summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2017-02-15 08:26:28 -0500
committerAnthony G. Basile <blueness@gentoo.org>2017-02-15 08:26:28 -0500
commitc5ee04267efee24744bf49ef28585f5d924bd816 (patch)
treee5b40a8f54ba0185a3f69a76bbec25a852fa570d
parentgrsecurity-3.1-4.8.17-201701151620 (diff)
downloadhardened-patchset-c5ee04267efee24744bf49ef28585f5d924bd816.tar.gz
hardened-patchset-c5ee04267efee24744bf49ef28585f5d924bd816.tar.bz2
hardened-patchset-c5ee04267efee24744bf49ef28585f5d924bd816.zip
grsecurity-3.1-4.9.9-20170212204420170212
-rw-r--r--4.8.17/1016_linux-4.8.17.patch3229
-rw-r--r--4.9.9/0000_README (renamed from 4.8.17/0000_README)10
-rw-r--r--4.9.9/1007_linux-4.9.8.patch2048
-rw-r--r--4.9.9/1008_linux-4.9.9.patch2333
-rw-r--r--4.9.9/4420_grsecurity-3.1-4.9.9-201702122044.patch (renamed from 4.8.17/4420_grsecurity-3.1-4.8.17-201701151620.patch)34503
-rw-r--r--4.9.9/4425_grsec_remove_EI_PAX.patch (renamed from 4.8.17/4425_grsec_remove_EI_PAX.patch)0
-rw-r--r--4.9.9/4426_default_XATTR_PAX_FLAGS.patch (renamed from 4.8.17/4426_default_XATTR_PAX_FLAGS.patch)0
-rw-r--r--4.9.9/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.8.17/4427_force_XATTR_PAX_tmpfs.patch)8
-rw-r--r--4.9.9/4430_grsec-remove-localversion-grsec.patch (renamed from 4.8.17/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--4.9.9/4435_grsec-mute-warnings.patch (renamed from 4.8.17/4435_grsec-mute-warnings.patch)0
-rw-r--r--4.9.9/4440_grsec-remove-protected-paths.patch (renamed from 4.8.17/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--4.9.9/4450_grsec-kconfig-default-gids.patch (renamed from 4.8.17/4450_grsec-kconfig-default-gids.patch)0
-rw-r--r--4.9.9/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.8.17/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--4.9.9/4470_disable-compat_vdso.patch (renamed from 4.8.17/4470_disable-compat_vdso.patch)2
-rw-r--r--4.9.9/4475_emutramp_default_on.patch (renamed from 4.8.17/4475_emutramp_default_on.patch)0
15 files changed, 23742 insertions, 18391 deletions
diff --git a/4.8.17/1016_linux-4.8.17.patch b/4.8.17/1016_linux-4.8.17.patch
deleted file mode 100644
index 0b240d8..0000000
--- a/4.8.17/1016_linux-4.8.17.patch
+++ /dev/null
@@ -1,3229 +0,0 @@
-diff --git a/Documentation/sphinx/rstFlatTable.py b/Documentation/sphinx/rstFlatTable.py
-index 26db852..9916359 100644
---- a/Documentation/sphinx/rstFlatTable.py
-+++ b/Documentation/sphinx/rstFlatTable.py
-@@ -151,6 +151,11 @@ class ListTableBuilder(object):
- def buildTableNode(self):
-
- colwidths = self.directive.get_column_widths(self.max_cols)
-+ if isinstance(colwidths, tuple):
-+ # Since docutils 0.13, get_column_widths returns a (widths,
-+ # colwidths) tuple, where widths is a string (i.e. 'auto').
-+ # See https://sourceforge.net/p/docutils/patches/120/.
-+ colwidths = colwidths[1]
- stub_columns = self.directive.options.get('stub-columns', 0)
- header_rows = self.directive.options.get('header-rows', 0)
-
-diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
-index 739db9a..a7596e9 100644
---- a/Documentation/virtual/kvm/api.txt
-+++ b/Documentation/virtual/kvm/api.txt
-@@ -2039,6 +2039,7 @@ registers, find a list below:
- PPC | KVM_REG_PPC_TM_VSCR | 32
- PPC | KVM_REG_PPC_TM_DSCR | 64
- PPC | KVM_REG_PPC_TM_TAR | 64
-+ PPC | KVM_REG_PPC_TM_XER | 64
- | |
- MIPS | KVM_REG_MIPS_R0 | 64
- ...
-diff --git a/Makefile b/Makefile
-index 50f6864..ace32d3 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1,6 +1,6 @@
- VERSION = 4
- PATCHLEVEL = 8
--SUBLEVEL = 16
-+SUBLEVEL = 17
- EXTRAVERSION =
- NAME = Psychotic Stoned Sheep
-
-diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
-index a093adb..fc662f4 100644
---- a/arch/arc/include/asm/cacheflush.h
-+++ b/arch/arc/include/asm/cacheflush.h
-@@ -85,6 +85,10 @@ void flush_anon_page(struct vm_area_struct *vma,
- */
- #define PG_dc_clean PG_arch_1
-
-+#define CACHE_COLORS_NUM 4
-+#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
-+#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
-+
- /*
- * Simple wrapper over config option
- * Bootup code ensures that hardware matches kernel configuration
-@@ -94,8 +98,6 @@ static inline int cache_is_vipt_aliasing(void)
- return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
- }
-
--#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
--
- /*
- * checks if two addresses (after page aligning) index into same cache set
- */
-diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
-index 0b10efe..ab1aaf2 100644
---- a/arch/arc/mm/cache.c
-+++ b/arch/arc/mm/cache.c
-@@ -967,11 +967,16 @@ void arc_cache_init(void)
- /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
- if (is_isa_arcompact()) {
- int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
--
-- if (dc->alias && !handled)
-- panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
-- else if (!dc->alias && handled)
-+ int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
-+
-+ if (dc->alias) {
-+ if (!handled)
-+ panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
-+ if (CACHE_COLORS_NUM != num_colors)
-+ panic("CACHE_COLORS_NUM not optimized for config\n");
-+ } else if (!dc->alias && handled) {
- panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
-+ }
- }
- }
-
-diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
-index 5fda583..906fb83 100644
---- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
-+++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
-@@ -21,6 +21,10 @@
- reg = <0x0 0x80000000 0x1 0x0>;
- };
-
-+ gpu@57000000 {
-+ vdd-supply = <&vdd_gpu>;
-+ };
-+
- /* debug port */
- serial@70006000 {
- status = "okay";
-@@ -291,4 +295,18 @@
- clock-frequency = <32768>;
- };
- };
-+
-+ regulators {
-+ vdd_gpu: regulator@100 {
-+ compatible = "pwm-regulator";
-+ reg = <100>;
-+ pwms = <&pwm 1 4880>;
-+ regulator-name = "VDD_GPU";
-+ regulator-min-microvolt = <710000>;
-+ regulator-max-microvolt = <1320000>;
-+ enable-gpios = <&pmic 6 GPIO_ACTIVE_HIGH>;
-+ regulator-ramp-delay = <80>;
-+ regulator-enable-ramp-delay = <1000>;
-+ };
-+ };
- };
-diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
-index 5a84b45..03498c8 100644
---- a/arch/arm64/kvm/hyp/switch.c
-+++ b/arch/arm64/kvm/hyp/switch.c
-@@ -82,7 +82,13 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
- write_sysreg(val, hcr_el2);
- /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
- write_sysreg(1 << 15, hstr_el2);
-- /* Make sure we trap PMU access from EL0 to EL2 */
-+ /*
-+ * Make sure we trap PMU access from EL0 to EL2. Also sanitize
-+ * PMSELR_EL0 to make sure it never contains the cycle
-+ * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
-+ * EL1 instead of being trapped to EL2.
-+ */
-+ write_sysreg(0, pmselr_el0);
- write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
- write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
- __activate_traps_arch()();
-diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S
-index b6fcbaf..3dc44b0 100644
---- a/arch/powerpc/boot/ps3-head.S
-+++ b/arch/powerpc/boot/ps3-head.S
-@@ -57,11 +57,6 @@ __system_reset_overlay:
- bctr
-
- 1:
-- /* Save the value at addr zero for a null pointer write check later. */
--
-- li r4, 0
-- lwz r3, 0(r4)
--
- /* Primary delays then goes to _zimage_start in wrapper. */
-
- or 31, 31, 31 /* db16cyc */
-diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
-index 4ec2d86..a05558a 100644
---- a/arch/powerpc/boot/ps3.c
-+++ b/arch/powerpc/boot/ps3.c
-@@ -119,13 +119,12 @@ void ps3_copy_vectors(void)
- flush_cache((void *)0x100, 512);
- }
-
--void platform_init(unsigned long null_check)
-+void platform_init(void)
- {
- const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
- void *chosen;
- unsigned long ft_addr;
- u64 rm_size;
-- unsigned long val;
-
- console_ops.write = ps3_console_write;
- platform_ops.exit = ps3_exit;
-@@ -153,11 +152,6 @@ void platform_init(unsigned long null_check)
-
- printf(" flat tree at 0x%lx\n\r", ft_addr);
-
-- val = *(unsigned long *)0;
--
-- if (val != null_check)
-- printf("null check failed: %lx != %lx\n\r", val, null_check);
--
- ((kernel_entry_t)0)(ft_addr, 0, NULL);
-
- ps3_exit();
-diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
-index ec35af3..f2c5dde 100644
---- a/arch/powerpc/include/asm/kvm_host.h
-+++ b/arch/powerpc/include/asm/kvm_host.h
-@@ -555,6 +555,7 @@ struct kvm_vcpu_arch {
- u64 tfiar;
-
- u32 cr_tm;
-+ u64 xer_tm;
- u64 lr_tm;
- u64 ctr_tm;
- u64 amr_tm;
-diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
-index c93cf35..0fb1326 100644
---- a/arch/powerpc/include/uapi/asm/kvm.h
-+++ b/arch/powerpc/include/uapi/asm/kvm.h
-@@ -596,6 +596,7 @@ struct kvm_get_htab_header {
- #define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
- #define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
- #define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
-+#define KVM_REG_PPC_TM_XER (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
-
- /* PPC64 eXternal Interrupt Controller Specification */
- #define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
-diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
-index b89d14c..6ba221c 100644
---- a/arch/powerpc/kernel/asm-offsets.c
-+++ b/arch/powerpc/kernel/asm-offsets.c
-@@ -569,6 +569,7 @@ int main(void)
- DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
- DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
- DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
-+ DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
- DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
- DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
- DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
-diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
-index f765b04..8825789 100644
---- a/arch/powerpc/kernel/head_64.S
-+++ b/arch/powerpc/kernel/head_64.S
-@@ -201,9 +201,9 @@ booting_thread_hwid:
- */
- _GLOBAL(book3e_start_thread)
- LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
-- cmpi 0, r3, 0
-+ cmpwi r3, 0
- beq 10f
-- cmpi 0, r3, 1
-+ cmpwi r3, 1
- beq 11f
- /* If the thread id is invalid, just exit. */
- b 13f
-@@ -228,9 +228,9 @@ _GLOBAL(book3e_start_thread)
- * r3 = the thread physical id
- */
- _GLOBAL(book3e_stop_thread)
-- cmpi 0, r3, 0
-+ cmpwi r3, 0
- beq 10f
-- cmpi 0, r3, 1
-+ cmpwi r3, 1
- beq 10f
- /* If the thread id is invalid, just exit. */
- b 13f
-diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
-index 2fd5580..4c8d344 100644
---- a/arch/powerpc/kvm/book3s_hv.c
-+++ b/arch/powerpc/kvm/book3s_hv.c
-@@ -1235,6 +1235,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
- case KVM_REG_PPC_TM_CR:
- *val = get_reg_val(id, vcpu->arch.cr_tm);
- break;
-+ case KVM_REG_PPC_TM_XER:
-+ *val = get_reg_val(id, vcpu->arch.xer_tm);
-+ break;
- case KVM_REG_PPC_TM_LR:
- *val = get_reg_val(id, vcpu->arch.lr_tm);
- break;
-@@ -1442,6 +1445,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
- case KVM_REG_PPC_TM_CR:
- vcpu->arch.cr_tm = set_reg_val(id, *val);
- break;
-+ case KVM_REG_PPC_TM_XER:
-+ vcpu->arch.xer_tm = set_reg_val(id, *val);
-+ break;
- case KVM_REG_PPC_TM_LR:
- vcpu->arch.lr_tm = set_reg_val(id, *val);
- break;
-diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
-index 99b4e9d..5420d06 100644
---- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
-+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
-@@ -653,6 +653,8 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
- HPTE_V_ABSENT);
- do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
- true);
-+ /* Don't lose R/C bit updates done by hardware */
-+ r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
- hpte[1] = cpu_to_be64(r);
- }
- }
-diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
-index 9756555..bf243a4 100644
---- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
-+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
-@@ -2579,11 +2579,13 @@ kvmppc_save_tm:
- mfctr r7
- mfspr r8, SPRN_AMR
- mfspr r10, SPRN_TAR
-+ mfxer r11
- std r5, VCPU_LR_TM(r9)
- stw r6, VCPU_CR_TM(r9)
- std r7, VCPU_CTR_TM(r9)
- std r8, VCPU_AMR_TM(r9)
- std r10, VCPU_TAR_TM(r9)
-+ std r11, VCPU_XER_TM(r9)
-
- /* Restore r12 as trap number. */
- lwz r12, VCPU_TRAP(r9)
-@@ -2676,11 +2678,13 @@ kvmppc_restore_tm:
- ld r7, VCPU_CTR_TM(r4)
- ld r8, VCPU_AMR_TM(r4)
- ld r9, VCPU_TAR_TM(r4)
-+ ld r10, VCPU_XER_TM(r4)
- mtlr r5
- mtcr r6
- mtctr r7
- mtspr SPRN_AMR, r8
- mtspr SPRN_TAR, r9
-+ mtxer r10
-
- /*
- * Load up PPR and DSCR values but don't put them in the actual SPRs
-diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
-index 7f7ba5f2..d027f2e 100644
---- a/arch/s390/kernel/setup.c
-+++ b/arch/s390/kernel/setup.c
-@@ -445,7 +445,7 @@ static void __init setup_resources(void)
- * part of the System RAM resource.
- */
- if (crashk_res.end) {
-- memblock_add(crashk_res.start, resource_size(&crashk_res));
-+ memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
- memblock_reserve(crashk_res.start, resource_size(&crashk_res));
- insert_resource(&iomem_resource, &crashk_res);
- }
-diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
-index 0b56666..b84a349 100644
---- a/arch/x86/entry/entry_32.S
-+++ b/arch/x86/entry/entry_32.S
-@@ -852,8 +852,8 @@ ftrace_graph_call:
- jmp ftrace_stub
- #endif
-
--.globl ftrace_stub
--ftrace_stub:
-+/* This is weak to keep gas from relaxing the jumps */
-+WEAK(ftrace_stub)
- ret
- END(ftrace_caller)
-
-diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
-index 8c925ec..7b0f1d9 100644
---- a/arch/x86/events/core.c
-+++ b/arch/x86/events/core.c
-@@ -364,7 +364,11 @@ int x86_add_exclusive(unsigned int what)
- {
- int i;
-
-- if (x86_pmu.lbr_pt_coexist)
-+ /*
-+ * When lbr_pt_coexist we allow PT to coexist with either LBR or BTS.
-+ * LBR and BTS are still mutually exclusive.
-+ */
-+ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
- return 0;
-
- if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
-@@ -387,7 +391,7 @@ int x86_add_exclusive(unsigned int what)
-
- void x86_del_exclusive(unsigned int what)
- {
-- if (x86_pmu.lbr_pt_coexist)
-+ if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
- return;
-
- atomic_dec(&x86_pmu.lbr_exclusive[what]);
-diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
-index 3ca87b5..834262a 100644
---- a/arch/x86/events/intel/cstate.c
-+++ b/arch/x86/events/intel/cstate.c
-@@ -571,6 +571,9 @@ static int __init cstate_probe(const struct cstate_model *cm)
-
- static inline void cstate_cleanup(void)
- {
-+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
-+ cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
-+
- if (has_cstate_core)
- perf_pmu_unregister(&cstate_core_pmu);
-
-@@ -583,16 +586,16 @@ static int __init cstate_init(void)
- int err;
-
- cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
-- "AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init,
-- NULL);
-+ "perf/x86/cstate:starting", cstate_cpu_init, NULL);
- cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
-- "AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit);
-+ "perf/x86/cstate:online", NULL, cstate_cpu_exit);
-
- if (has_cstate_core) {
- err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
- if (err) {
- has_cstate_core = false;
- pr_info("Failed to register cstate core pmu\n");
-+ cstate_cleanup();
- return err;
- }
- }
-@@ -606,8 +609,7 @@ static int __init cstate_init(void)
- return err;
- }
- }
--
-- return err;
-+ return 0;
- }
-
- static int __init cstate_pmu_init(void)
-@@ -632,8 +634,6 @@ module_init(cstate_pmu_init);
-
- static void __exit cstate_pmu_exit(void)
- {
-- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
-- cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
- cstate_cleanup();
- }
- module_exit(cstate_pmu_exit);
-diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
-index 181c238..4ab002d 100644
---- a/arch/x86/events/perf_event.h
-+++ b/arch/x86/events/perf_event.h
-@@ -601,7 +601,7 @@ struct x86_pmu {
- u64 lbr_sel_mask; /* LBR_SELECT valid bits */
- const int *lbr_sel_map; /* lbr_select mappings */
- bool lbr_double_abort; /* duplicated lbr aborts */
-- bool lbr_pt_coexist; /* LBR may coexist with PT */
-+ bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
-
- /*
- * Intel PT/LBR/BTS are exclusive
-diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 5cede40..7a72db5 100644
---- a/arch/x86/kvm/vmx.c
-+++ b/arch/x86/kvm/vmx.c
-@@ -1336,10 +1336,10 @@ static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
- return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
- }
-
--static inline bool is_exception(u32 intr_info)
-+static inline bool is_nmi(u32 intr_info)
- {
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
-- == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
-+ == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
- }
-
- static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
-@@ -5467,7 +5467,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
- if (is_machine_check(intr_info))
- return handle_machine_check(vcpu);
-
-- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
-+ if (is_nmi(intr_info))
- return 1; /* already handled by vmx_vcpu_run() */
-
- if (is_no_device(intr_info)) {
-@@ -7974,7 +7974,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
-
- switch (exit_reason) {
- case EXIT_REASON_EXCEPTION_NMI:
-- if (!is_exception(intr_info))
-+ if (is_nmi(intr_info))
- return false;
- else if (is_page_fault(intr_info))
- return enable_ept;
-@@ -8572,8 +8572,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
- kvm_machine_check();
-
- /* We need to handle NMIs before interrupts are enabled */
-- if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
-- (exit_intr_info & INTR_INFO_VALID_MASK)) {
-+ if (is_nmi(exit_intr_info)) {
- kvm_before_handle_nmi(&vmx->vcpu);
- asm("int $2");
- kvm_after_handle_nmi(&vmx->vcpu);
-diff --git a/block/bsg.c b/block/bsg.c
-index d214e92..b9a5361 100644
---- a/block/bsg.c
-+++ b/block/bsg.c
-@@ -655,6 +655,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-
- dprintk("%s: write %Zd bytes\n", bd->name, count);
-
-+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
-+ return -EINVAL;
-+
- bsg_set_block(bd, file);
-
- bytes_written = 0;
-diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
-index a6b36fc..02ded25 100644
---- a/drivers/acpi/video_detect.c
-+++ b/drivers/acpi/video_detect.c
-@@ -296,6 +296,26 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
- },
- },
-+ {
-+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1123661 */
-+ .callback = video_detect_force_native,
-+ .ident = "Dell XPS 17 L702X",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
-+ },
-+ },
-+ {
-+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
-+ /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
-+ .callback = video_detect_force_native,
-+ .ident = "HP Pavilion dv6",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
-+ },
-+ },
-+
- { },
- };
-
-diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
-index 22d1760..a95e1e5 100644
---- a/drivers/base/firmware_class.c
-+++ b/drivers/base/firmware_class.c
-@@ -955,13 +955,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
- timeout = MAX_JIFFY_OFFSET;
- }
-
-- retval = wait_for_completion_interruptible_timeout(&buf->completion,
-+ timeout = wait_for_completion_interruptible_timeout(&buf->completion,
- timeout);
-- if (retval == -ERESTARTSYS || !retval) {
-+ if (timeout == -ERESTARTSYS || !timeout) {
-+ retval = timeout;
- mutex_lock(&fw_lock);
- fw_load_abort(fw_priv);
- mutex_unlock(&fw_lock);
-- } else if (retval > 0) {
-+ } else if (timeout > 0) {
- retval = 0;
- }
-
-diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
-index 0fc71cb..3250694 100644
---- a/drivers/clk/bcm/clk-bcm2835.c
-+++ b/drivers/clk/bcm/clk-bcm2835.c
-@@ -751,7 +751,9 @@ static void bcm2835_pll_divider_off(struct clk_hw *hw)
- cprman_write(cprman, data->cm_reg,
- (cprman_read(cprman, data->cm_reg) &
- ~data->load_mask) | data->hold_mask);
-- cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE);
-+ cprman_write(cprman, data->a2w_reg,
-+ cprman_read(cprman, data->a2w_reg) |
-+ A2W_PLL_CHANNEL_DISABLE);
- spin_unlock(&cprman->regs_lock);
- }
-
-diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
-index 15704aa..a8eea8a 100644
---- a/drivers/gpio/gpiolib.c
-+++ b/drivers/gpio/gpiolib.c
-@@ -984,7 +984,8 @@ static int gpio_chrdev_open(struct inode *inode, struct file *filp)
- return -ENODEV;
- get_device(&gdev->dev);
- filp->private_data = gdev;
-- return 0;
-+
-+ return nonseekable_open(inode, filp);
- }
-
- /**
-@@ -1009,7 +1010,7 @@ static const struct file_operations gpio_fileops = {
- .release = gpio_chrdev_release,
- .open = gpio_chrdev_open,
- .owner = THIS_MODULE,
-- .llseek = noop_llseek,
-+ .llseek = no_llseek,
- .unlocked_ioctl = gpio_ioctl,
- #ifdef CONFIG_COMPAT
- .compat_ioctl = gpio_ioctl_compat,
-diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
-index b818461..8199232 100644
---- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
-+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
-@@ -3798,8 +3798,12 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
- temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
- data = mmRLC_SRM_INDEX_CNTL_DATA_0;
- for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
-- amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false);
-- amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false);
-+ if (unique_indices[i] != 0) {
-+ amdgpu_mm_wreg(adev, temp + i,
-+ unique_indices[i] & 0x3FFFF, false);
-+ amdgpu_mm_wreg(adev, data + i,
-+ unique_indices[i] >> 20, false);
-+ }
- }
- kfree(register_list_format);
-
-@@ -5735,29 +5739,24 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
-
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
-- /* 1 enable cntx_empty_int_enable/cntx_busy_int_enable/
-- * Cmp_busy/GFX_Idle interrupts
-- */
-- gfx_v8_0_enable_gui_idle_interrupt(adev, true);
--
- temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
- data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
- if (temp1 != data1)
- WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
-
-- /* 2 wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
-+ /* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
- gfx_v8_0_wait_for_rlc_serdes(adev);
-
-- /* 3 - clear cgcg override */
-+ /* 2 - clear cgcg override */
- gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
-
- /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
- gfx_v8_0_wait_for_rlc_serdes(adev);
-
-- /* 4 - write cmd to set CGLS */
-+ /* 3 - write cmd to set CGLS */
- gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
-
-- /* 5 - enable cgcg */
-+ /* 4 - enable cgcg */
- data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
-
- if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
-@@ -5775,6 +5774,11 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
-
- if (temp != data)
- WREG32(mmRLC_CGCG_CGLS_CTRL, data);
-+
-+ /* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
-+ * Cmp_busy/GFX_Idle interrupts
-+ */
-+ gfx_v8_0_enable_gui_idle_interrupt(adev, true);
- } else {
- /* disable cntx_empty_int_enable & GFX Idle interrupt */
- gfx_v8_0_enable_gui_idle_interrupt(adev, false);
-diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
-index 904beaa..f75c642 100644
---- a/drivers/gpu/drm/ast/ast_main.c
-+++ b/drivers/gpu/drm/ast/ast_main.c
-@@ -223,7 +223,8 @@ static int ast_get_dram_info(struct drm_device *dev)
- ast_write32(ast, 0x10000, 0xfc600309);
-
- do {
-- ;
-+ if (pci_channel_offline(dev->pdev))
-+ return -EIO;
- } while (ast_read32(ast, 0x10000) != 0x01);
- data = ast_read32(ast, 0x10004);
-
-@@ -428,7 +429,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
- ast_detect_chip(dev, &need_post);
-
- if (ast->chip != AST1180) {
-- ast_get_dram_info(dev);
-+ ret = ast_get_dram_info(dev);
-+ if (ret)
-+ goto out_free;
- ast->vram_size = ast_get_vram_info(dev);
- DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
- }
-diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
-index 50eb944f..8f3ca52 100644
---- a/drivers/gpu/drm/gma500/psb_drv.c
-+++ b/drivers/gpu/drm/gma500/psb_drv.c
-@@ -473,6 +473,9 @@ static const struct file_operations psb_gem_fops = {
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = psb_unlocked_ioctl,
-+#ifdef CONFIG_COMPAT
-+ .compat_ioctl = drm_compat_ioctl,
-+#endif
- .mmap = drm_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
-diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
-index 2bb69f3..b386b31 100644
---- a/drivers/gpu/drm/i915/i915_gem_stolen.c
-+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
-@@ -55,10 +55,9 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
- return -ENODEV;
-
- /* See the comment at the drm_mm_init() call for more about this check.
-- * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
-+ * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
- */
-- if (start < 4096 && (IS_GEN8(dev_priv) ||
-- IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
-+ if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
- start = 4096;
-
- mutex_lock(&dev_priv->mm.stolen_lock);
-diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 35d385d..6bc93ba 100644
---- a/drivers/gpu/drm/i915/intel_display.c
-+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -13494,8 +13494,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
-
- DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
- intel_state->cdclk, intel_state->dev_cdclk);
-- } else
-+ } else {
- to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
-+ }
-
- intel_modeset_clear_plls(state);
-
-@@ -13596,8 +13597,9 @@ static int intel_atomic_check(struct drm_device *dev,
-
- if (ret)
- return ret;
-- } else
-- intel_state->cdclk = dev_priv->cdclk_freq;
-+ } else {
-+ intel_state->cdclk = dev_priv->atomic_cdclk_freq;
-+ }
-
- ret = drm_atomic_helper_check_planes(dev, state);
- if (ret)
-@@ -15902,6 +15904,7 @@ void intel_modeset_init(struct drm_device *dev)
-
- intel_update_czclk(dev_priv);
- intel_update_cdclk(dev);
-+ dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
-
- intel_shared_dpll_init(dev);
-
-diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
-index cd154ce..3460157 100644
---- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
-+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
-@@ -296,7 +296,8 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
- mutex_lock(&dev_priv->sb_lock);
- vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
- vlv_iosf_sb_write(dev_priv, port, cfg0,
-- CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
-+ CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
-+ CHV_GPIO_GPIOTXSTATE(value));
- mutex_unlock(&dev_priv->sb_lock);
- }
-
-diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
-index 1c603bb..07a7cc0 100644
---- a/drivers/gpu/drm/i915/intel_runtime_pm.c
-+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
-@@ -1062,7 +1062,18 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
-
- static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
- {
-- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
-+ u32 val;
-+
-+ /*
-+ * On driver load, a pipe may be active and driving a DSI display.
-+ * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck
-+ * (and never recovering) in this case. intel_dsi_post_disable() will
-+ * clear it when we turn off the display.
-+ */
-+ val = I915_READ(DSPCLK_GATE_D);
-+ val &= DPOUNIT_CLOCK_GATE_DISABLE;
-+ val |= VRHUNIT_CLOCK_GATE_DISABLE;
-+ I915_WRITE(DSPCLK_GATE_D, val);
-
- /*
- * Disable trickle feed and enable pnd deadline calculation
-diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
-index a1570b1..23ffe85 100644
---- a/drivers/gpu/drm/nouveau/nouveau_bios.c
-+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
-@@ -333,6 +333,9 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
- if (bios->major_version < 5 && bios->data[0x48] & 0x4)
- return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
-
-+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
-+ return nvif_rd32(device, 0x001800) & 0x0000000f;
-+ else
- if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
- return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
- else
-diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
-index 864323b..fad263b 100644
---- a/drivers/gpu/drm/nouveau/nouveau_bo.c
-+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
-@@ -1209,6 +1209,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
- nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
- nvkm_vm_map(vma, new_mem->mm_node);
- } else {
-+ WARN_ON(ttm_bo_wait(bo, false, false));
- nvkm_vm_unmap(vma);
- }
- }
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
-index 7218a06..e0d7f84 100644
---- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
-@@ -1851,7 +1851,7 @@ nvf1_chipset = {
- .fb = gk104_fb_new,
- .fuse = gf100_fuse_new,
- .gpio = gk104_gpio_new,
-- .i2c = gf119_i2c_new,
-+ .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
-@@ -1965,7 +1965,7 @@ nv117_chipset = {
- .fb = gm107_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
-- .i2c = gf119_i2c_new,
-+ .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
-@@ -1999,7 +1999,7 @@ nv118_chipset = {
- .fb = gm107_fb_new,
- .fuse = gm107_fuse_new,
- .gpio = gk104_gpio_new,
-- .i2c = gf119_i2c_new,
-+ .i2c = gk104_i2c_new,
- .ibus = gk104_ibus_new,
- .iccsense = gf100_iccsense_new,
- .imem = nv50_instmem_new,
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
-index cbc67f2..12d96426 100644
---- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogf100.c
-@@ -60,6 +60,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
- struct nvkm_gpuobj *inst = chan->base.inst;
- int ret = 0;
-
-+ mutex_lock(&subdev->mutex);
- nvkm_wr32(device, 0x002634, chan->base.chid);
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x002634) == chan->base.chid)
-@@ -67,10 +68,12 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
- ) < 0) {
- nvkm_error(subdev, "channel %d [%s] kick timeout\n",
- chan->base.chid, chan->base.object.client->name);
-- ret = -EBUSY;
-- if (suspend)
-- return ret;
-+ ret = -ETIMEDOUT;
- }
-+ mutex_unlock(&subdev->mutex);
-+
-+ if (ret && suspend)
-+ return ret;
-
- if (offset) {
- nvkm_kmap(inst);
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
-index ed43510..a2df4f3 100644
---- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
-@@ -40,7 +40,9 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
- struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_client *client = chan->base.object.client;
-+ int ret = 0;
-
-+ mutex_lock(&subdev->mutex);
- nvkm_wr32(device, 0x002634, chan->base.chid);
- if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
-@@ -48,10 +50,10 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
- ) < 0) {
- nvkm_error(subdev, "channel %d [%s] kick timeout\n",
- chan->base.chid, client->name);
-- return -EBUSY;
-+ ret = -ETIMEDOUT;
- }
--
-- return 0;
-+ mutex_unlock(&subdev->mutex);
-+ return ret;
- }
-
- static u32
-diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
-index 157919c..6584d50 100644
---- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
-+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
-@@ -1756,6 +1756,50 @@ gf100_gr_ = {
- };
-
- int
-+gf100_gr_ctor_fw_legacy(struct gf100_gr *gr, const char *fwname,
-+ struct gf100_gr_fuc *fuc, int ret)
-+{
-+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
-+ struct nvkm_device *device = subdev->device;
-+ const struct firmware *fw;
-+ char f[32];
-+
-+ /* see if this firmware has a legacy path */
-+ if (!strcmp(fwname, "fecs_inst"))
-+ fwname = "fuc409c";
-+ else if (!strcmp(fwname, "fecs_data"))
-+ fwname = "fuc409d";
-+ else if (!strcmp(fwname, "gpccs_inst"))
-+ fwname = "fuc41ac";
-+ else if (!strcmp(fwname, "gpccs_data"))
-+ fwname = "fuc41ad";
-+ else {
-+ /* nope, let's just return the error we got */
-+ nvkm_error(subdev, "failed to load %s\n", fwname);
-+ return ret;
-+ }
-+
-+ /* yes, try to load from the legacy path */
-+ nvkm_debug(subdev, "%s: falling back to legacy path\n", fwname);
-+
-+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
-+ ret = request_firmware(&fw, f, device->dev);
-+ if (ret) {
-+ snprintf(f, sizeof(f), "nouveau/%s", fwname);
-+ ret = request_firmware(&fw, f, device->dev);
-+ if (ret) {
-+ nvkm_error(subdev, "failed to load %s\n", fwname);
-+ return ret;
-+ }
-+ }
-+
-+ fuc->size = fw->size;
-+ fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
-+ release_firmware(fw);
-+ return (fuc->data != NULL) ? 0 : -ENOMEM;
-+}
-+
-+int
- gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
- struct gf100_gr_fuc *fuc)
- {
-@@ -1765,10 +1809,8 @@ gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
- int ret;
-
- ret = nvkm_firmware_get(device, fwname, &fw);
-- if (ret) {
-- nvkm_error(subdev, "failed to load %s\n", fwname);
-- return ret;
-- }
-+ if (ret)
-+ return gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
-
- fuc->size = fw->size;
- fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
-diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
-index 212800e..7d1d3c6 100644
---- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
-+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
-@@ -12,6 +12,7 @@ struct nvbios_source {
- bool rw;
- bool ignore_checksum;
- bool no_pcir;
-+ bool require_checksum;
- };
-
- int nvbios_extend(struct nvkm_bios *, u32 length);
-diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
-index b2557e8..7deb81b 100644
---- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
-+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
-@@ -86,9 +86,12 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
- nvbios_checksum(&bios->data[image.base], image.size)) {
- nvkm_debug(subdev, "%08x: checksum failed\n",
- image.base);
-- if (mthd->func->rw)
-+ if (!mthd->func->require_checksum) {
-+ if (mthd->func->rw)
-+ score += 1;
- score += 1;
-- score += 1;
-+ } else
-+ return 0;
- } else {
- score += 3;
- }
-diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
-index 8fecb5f..06572f8 100644
---- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
-+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
-@@ -99,6 +99,7 @@ nvbios_acpi_fast = {
- .init = acpi_init,
- .read = acpi_read_fast,
- .rw = false,
-+ .require_checksum = true,
- };
-
- const struct nvbios_source
-diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
-index 39c2a38..0c7ef25 100644
---- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
-+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
-@@ -47,8 +47,10 @@ nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
-
- BUG_ON((first > limit) || (limit >= ltc->num_tags));
-
-+ mutex_lock(&ltc->subdev.mutex);
- ltc->func->cbc_clear(ltc, first, limit);
- ltc->func->cbc_wait(ltc);
-+ mutex_unlock(&ltc->subdev.mutex);
- }
-
- int
-diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
-index 2a10e24..87a7247 100644
---- a/drivers/gpu/drm/radeon/radeon_cursor.c
-+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
-@@ -90,6 +90,9 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_device *rdev = crtc->dev->dev_private;
-
-+ if (radeon_crtc->cursor_out_of_bounds)
-+ return;
-+
- if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
- upper_32_bits(radeon_crtc->cursor_addr));
-@@ -148,16 +151,17 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
- x += crtc->x;
- y += crtc->y;
- }
-- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
-
-- if (x < 0) {
-+ if (x < 0)
- xorigin = min(-x, radeon_crtc->max_cursor_width - 1);
-- x = 0;
-- }
-- if (y < 0) {
-+ if (y < 0)
- yorigin = min(-y, radeon_crtc->max_cursor_height - 1);
-- y = 0;
-+
-+ if (!ASIC_IS_AVIVO(rdev)) {
-+ x += crtc->x;
-+ y += crtc->y;
- }
-+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
-
- /* fixed on DCE6 and newer */
- if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
-@@ -180,27 +184,31 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
- if (i > 1) {
- int cursor_end, frame_end;
-
-- cursor_end = x - xorigin + w;
-+ cursor_end = x + w;
- frame_end = crtc->x + crtc->mode.crtc_hdisplay;
- if (cursor_end >= frame_end) {
- w = w - (cursor_end - frame_end);
- if (!(frame_end & 0x7f))
- w--;
-- } else {
-- if (!(cursor_end & 0x7f))
-- w--;
-+ } else if (cursor_end <= 0) {
-+ goto out_of_bounds;
-+ } else if (!(cursor_end & 0x7f)) {
-+ w--;
- }
- if (w <= 0) {
-- w = 1;
-- cursor_end = x - xorigin + w;
-- if (!(cursor_end & 0x7f)) {
-- x--;
-- WARN_ON_ONCE(x < 0);
-- }
-+ goto out_of_bounds;
- }
- }
- }
-
-+ if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
-+ x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
-+ y >= (crtc->y + crtc->mode.crtc_vdisplay))
-+ goto out_of_bounds;
-+
-+ x += xorigin;
-+ y += yorigin;
-+
- if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
- WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
-@@ -212,6 +220,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
- WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
- ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
- } else {
-+ x -= crtc->x;
-+ y -= crtc->y;
-+
- if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
- y *= 2;
-
-@@ -232,6 +243,19 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
- radeon_crtc->cursor_x = x;
- radeon_crtc->cursor_y = y;
-
-+ if (radeon_crtc->cursor_out_of_bounds) {
-+ radeon_crtc->cursor_out_of_bounds = false;
-+ if (radeon_crtc->cursor_bo)
-+ radeon_show_cursor(crtc);
-+ }
-+
-+ return 0;
-+
-+ out_of_bounds:
-+ if (!radeon_crtc->cursor_out_of_bounds) {
-+ radeon_hide_cursor(crtc);
-+ radeon_crtc->cursor_out_of_bounds = true;
-+ }
- return 0;
- }
-
-@@ -297,22 +321,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
- return ret;
- }
-
-- radeon_crtc->cursor_width = width;
-- radeon_crtc->cursor_height = height;
--
- radeon_lock_cursor(crtc, true);
-
-- if (hot_x != radeon_crtc->cursor_hot_x ||
-+ if (width != radeon_crtc->cursor_width ||
-+ height != radeon_crtc->cursor_height ||
-+ hot_x != radeon_crtc->cursor_hot_x ||
- hot_y != radeon_crtc->cursor_hot_y) {
- int x, y;
-
- x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
- y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
-
-- radeon_cursor_move_locked(crtc, x, y);
--
-+ radeon_crtc->cursor_width = width;
-+ radeon_crtc->cursor_height = height;
- radeon_crtc->cursor_hot_x = hot_x;
- radeon_crtc->cursor_hot_y = hot_y;
-+
-+ radeon_cursor_move_locked(crtc, x, y);
- }
-
- radeon_show_cursor(crtc);
-diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
-index bb75201a..f1da484 100644
---- a/drivers/gpu/drm/radeon/radeon_mode.h
-+++ b/drivers/gpu/drm/radeon/radeon_mode.h
-@@ -330,6 +330,7 @@ struct radeon_crtc {
- u16 lut_r[256], lut_g[256], lut_b[256];
- bool enabled;
- bool can_tile;
-+ bool cursor_out_of_bounds;
- uint32_t crtc_offset;
- struct drm_gem_object *cursor_bo;
- uint64_t cursor_addr;
-diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
-index 2523ca9..2297ec7 100644
---- a/drivers/gpu/drm/radeon/si.c
-+++ b/drivers/gpu/drm/radeon/si.c
-@@ -1722,6 +1722,7 @@ static int si_init_microcode(struct radeon_device *rdev)
- (rdev->pdev->revision == 0x80) ||
- (rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
-+ (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6604) ||
- (rdev->pdev->device == 0x6605))
- new_smc = true;
-diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
-index c4993452..8b5e697 100644
---- a/drivers/gpu/drm/radeon/si_dpm.c
-+++ b/drivers/gpu/drm/radeon/si_dpm.c
-@@ -3026,6 +3026,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
- (rdev->pdev->revision == 0x80) ||
- (rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
-+ (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6604) ||
- (rdev->pdev->device == 0x6605)) {
- max_sclk = 75000;
-diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
-index b6c1211..0450852 100644
---- a/drivers/hv/channel_mgmt.c
-+++ b/drivers/hv/channel_mgmt.c
-@@ -348,6 +348,7 @@ void vmbus_free_channels(void)
- {
- struct vmbus_channel *channel, *tmp;
-
-+ mutex_lock(&vmbus_connection.channel_mutex);
- list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
- listentry) {
- /* hv_process_channel_removal() needs this */
-@@ -355,6 +356,7 @@ void vmbus_free_channels(void)
-
- vmbus_device_unregister(channel->device_obj);
- }
-+ mutex_unlock(&vmbus_connection.channel_mutex);
- }
-
- /*
-diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
-index 51f81d6..a6ea387 100644
---- a/drivers/hwtracing/stm/core.c
-+++ b/drivers/hwtracing/stm/core.c
-@@ -361,7 +361,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
- struct stm_file *stmf;
- struct device *dev;
- unsigned int major = imajor(inode);
-- int err = -ENODEV;
-+ int err = -ENOMEM;
-
- dev = class_find_device(&stm_class, NULL, &major, major_match);
- if (!dev)
-@@ -369,8 +369,9 @@ static int stm_char_open(struct inode *inode, struct file *file)
-
- stmf = kzalloc(sizeof(*stmf), GFP_KERNEL);
- if (!stmf)
-- return -ENOMEM;
-+ goto err_put_device;
-
-+ err = -ENODEV;
- stm_output_init(&stmf->output);
- stmf->stm = to_stm_device(dev);
-
-@@ -382,9 +383,10 @@ static int stm_char_open(struct inode *inode, struct file *file)
- return nonseekable_open(inode, file);
-
- err_free:
-+ kfree(stmf);
-+err_put_device:
- /* matches class_find_device() above */
- put_device(dev);
-- kfree(stmf);
-
- return err;
- }
-diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
-index 2d49228..85b2bfe 100644
---- a/drivers/infiniband/core/mad.c
-+++ b/drivers/infiniband/core/mad.c
-@@ -1746,7 +1746,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
- if (!class)
- goto out;
- if (convert_mgmt_class(mad_hdr->mgmt_class) >=
-- IB_MGMT_MAX_METHODS)
-+ ARRAY_SIZE(class->method_table))
- goto out;
- method = class->method_table[convert_mgmt_class(
- mad_hdr->mgmt_class)];
-diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
-index 51c79b2..45523cf 100644
---- a/drivers/infiniband/core/multicast.c
-+++ b/drivers/infiniband/core/multicast.c
-@@ -518,8 +518,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
- process_join_error(group, status);
- else {
- int mgids_changed, is_mgid0;
-- ib_find_pkey(group->port->dev->device, group->port->port_num,
-- be16_to_cpu(rec->pkey), &pkey_index);
-+
-+ if (ib_find_pkey(group->port->dev->device,
-+ group->port->port_num, be16_to_cpu(rec->pkey),
-+ &pkey_index))
-+ pkey_index = MCAST_INVALID_PKEY_INDEX;
-
- spin_lock_irq(&group->port->lock);
- if (group->state == MCAST_BUSY &&
-diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
-index 6329c97..4b892ca 100644
---- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
-+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
-@@ -2501,7 +2501,7 @@ static int i40iw_get_hw_stats(struct ib_device *ibdev,
- return -ENOSYS;
- }
-
-- memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
-+ memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats));
-
- return stats->num_counters;
- }
-diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
-index f724a7e..979e445 100644
---- a/drivers/infiniband/sw/rxe/rxe_qp.c
-+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
-@@ -850,4 +850,5 @@ void rxe_qp_cleanup(void *arg)
- free_rd_atomic_resources(qp);
-
- kernel_sock_shutdown(qp->sk, SHUT_RDWR);
-+ sock_release(qp->sk);
- }
-diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-index 1909dd2..fddff40 100644
---- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-@@ -575,8 +575,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
- if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
- return;
-
-- if (ib_query_port(priv->ca, priv->port, &port_attr) ||
-- port_attr.state != IB_PORT_ACTIVE) {
-+ if (ib_query_port(priv->ca, priv->port, &port_attr)) {
-+ ipoib_dbg(priv, "ib_query_port() failed\n");
-+ return;
-+ }
-+ if (port_attr.state != IB_PORT_ACTIVE) {
- ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
- port_attr.state);
- return;
-diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
-index 2adfd86c..930424e 100644
---- a/drivers/input/misc/drv260x.c
-+++ b/drivers/input/misc/drv260x.c
-@@ -592,7 +592,6 @@ static int drv260x_probe(struct i2c_client *client,
- }
-
- haptics->input_dev->name = "drv260x:haptics";
-- haptics->input_dev->dev.parent = client->dev.parent;
- haptics->input_dev->close = drv260x_close;
- input_set_drvdata(haptics->input_dev, haptics);
- input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
-diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index ee7fc37..a87549b 100644
---- a/drivers/md/raid5.c
-+++ b/drivers/md/raid5.c
-@@ -7017,6 +7017,15 @@ static int raid5_run(struct mddev *mddev)
- stripe = (stripe | (stripe-1)) + 1;
- mddev->queue->limits.discard_alignment = stripe;
- mddev->queue->limits.discard_granularity = stripe;
-+
-+ /*
-+ * We use 16-bit counter of active stripes in bi_phys_segments
-+ * (minus one for over-loaded initialization)
-+ */
-+ blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
-+ blk_queue_max_discard_sectors(mddev->queue,
-+ 0xfffe * STRIPE_SECTORS);
-+
- /*
- * unaligned part of discard request will be ignored, so can't
- * guarantee discard_zeroes_data
-diff --git a/drivers/media/dvb-frontends/mn88472.c b/drivers/media/dvb-frontends/mn88472.c
-index 18fb2df..7265011 100644
---- a/drivers/media/dvb-frontends/mn88472.c
-+++ b/drivers/media/dvb-frontends/mn88472.c
-@@ -488,18 +488,6 @@ static int mn88472_probe(struct i2c_client *client,
- goto err_kfree;
- }
-
-- /* Check demod answers with correct chip id */
-- ret = regmap_read(dev->regmap[0], 0xff, &utmp);
-- if (ret)
-- goto err_regmap_0_regmap_exit;
--
-- dev_dbg(&client->dev, "chip id=%02x\n", utmp);
--
-- if (utmp != 0x02) {
-- ret = -ENODEV;
-- goto err_regmap_0_regmap_exit;
-- }
--
- /*
- * Chip has three I2C addresses for different register banks. Used
- * addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
-@@ -536,6 +524,18 @@ static int mn88472_probe(struct i2c_client *client,
- }
- i2c_set_clientdata(dev->client[2], dev);
-
-+ /* Check demod answers with correct chip id */
-+ ret = regmap_read(dev->regmap[2], 0xff, &utmp);
-+ if (ret)
-+ goto err_regmap_2_regmap_exit;
-+
-+ dev_dbg(&client->dev, "chip id=%02x\n", utmp);
-+
-+ if (utmp != 0x02) {
-+ ret = -ENODEV;
-+ goto err_regmap_2_regmap_exit;
-+ }
-+
- /* Sleep because chip is active by default */
- ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
- if (ret)
-diff --git a/drivers/media/dvb-frontends/mn88473.c b/drivers/media/dvb-frontends/mn88473.c
-index 451974a..2932bdc 100644
---- a/drivers/media/dvb-frontends/mn88473.c
-+++ b/drivers/media/dvb-frontends/mn88473.c
-@@ -485,18 +485,6 @@ static int mn88473_probe(struct i2c_client *client,
- goto err_kfree;
- }
-
-- /* Check demod answers with correct chip id */
-- ret = regmap_read(dev->regmap[0], 0xff, &uitmp);
-- if (ret)
-- goto err_regmap_0_regmap_exit;
--
-- dev_dbg(&client->dev, "chip id=%02x\n", uitmp);
--
-- if (uitmp != 0x03) {
-- ret = -ENODEV;
-- goto err_regmap_0_regmap_exit;
-- }
--
- /*
- * Chip has three I2C addresses for different register banks. Used
- * addresses are 0x18, 0x1a and 0x1c. We register two dummy clients,
-@@ -533,6 +521,18 @@ static int mn88473_probe(struct i2c_client *client,
- }
- i2c_set_clientdata(dev->client[2], dev);
-
-+ /* Check demod answers with correct chip id */
-+ ret = regmap_read(dev->regmap[2], 0xff, &uitmp);
-+ if (ret)
-+ goto err_regmap_2_regmap_exit;
-+
-+ dev_dbg(&client->dev, "chip id=%02x\n", uitmp);
-+
-+ if (uitmp != 0x03) {
-+ ret = -ENODEV;
-+ goto err_regmap_2_regmap_exit;
-+ }
-+
- /* Sleep because chip is active by default */
- ret = regmap_write(dev->regmap[2], 0x05, 0x3e);
- if (ret)
-diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
-index 0b6d46c..02a8f51 100644
---- a/drivers/media/i2c/tvp5150.c
-+++ b/drivers/media/i2c/tvp5150.c
-@@ -815,6 +815,7 @@ static int tvp5150_s_ctrl(struct v4l2_ctrl *ctrl)
- return 0;
- case V4L2_CID_HUE:
- tvp5150_write(sd, TVP5150_HUE_CTL, ctrl->val);
-+ break;
- case V4L2_CID_TEST_PATTERN:
- decoder->enable = ctrl->val ? false : true;
- tvp5150_selmux(sd);
-diff --git a/drivers/media/pci/solo6x10/solo6x10.h b/drivers/media/pci/solo6x10/solo6x10.h
-index 5bd4987..3f8da5e 100644
---- a/drivers/media/pci/solo6x10/solo6x10.h
-+++ b/drivers/media/pci/solo6x10/solo6x10.h
-@@ -284,7 +284,10 @@ static inline u32 solo_reg_read(struct solo_dev *solo_dev, int reg)
- static inline void solo_reg_write(struct solo_dev *solo_dev, int reg,
- u32 data)
- {
-+ u16 val;
-+
- writel(data, solo_dev->reg_base + reg);
-+ pci_read_config_word(solo_dev->pdev, PCI_STATUS, &val);
- }
-
- static inline void solo_irq_on(struct solo_dev *dev, u32 mask)
-diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
-index e3f104f..9e88c2f 100644
---- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
-+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
-@@ -1073,6 +1073,7 @@ static struct device *s5p_mfc_alloc_memdev(struct device *dev,
- idx);
- if (ret == 0)
- return child;
-+ device_del(child);
- }
-
- put_device(child);
-diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
-index 641c1a5..75be3d5 100644
---- a/drivers/misc/mei/client.c
-+++ b/drivers/misc/mei/client.c
-@@ -675,7 +675,7 @@ void mei_host_client_init(struct mei_device *dev)
-
- pm_runtime_mark_last_busy(dev->dev);
- dev_dbg(dev->dev, "rpm: autosuspend\n");
-- pm_runtime_autosuspend(dev->dev);
-+ pm_request_autosuspend(dev->dev);
- }
-
- /**
-diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
-index 7ad15d6..c8307e8 100644
---- a/drivers/misc/mei/hw-me-regs.h
-+++ b/drivers/misc/mei/hw-me-regs.h
-@@ -122,6 +122,8 @@
- #define MEI_DEV_ID_SPT_H 0xA13A /* Sunrise Point H */
- #define MEI_DEV_ID_SPT_H_2 0xA13B /* Sunrise Point H 2 */
-
-+#define MEI_DEV_ID_LBG 0xA1BA /* Lewisburg (SPT) */
-+
- #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
- #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
-
-diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
-index 5eb9b75..69fca9b 100644
---- a/drivers/misc/mei/pci-me.c
-+++ b/drivers/misc/mei/pci-me.c
-@@ -87,6 +87,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
-+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)},
-
- {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
-diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
-index 6eb8f07..1f6205c 100644
---- a/drivers/mmc/host/sdhci.c
-+++ b/drivers/mmc/host/sdhci.c
-@@ -2074,7 +2074,27 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
- ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
- sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
-
-+ sdhci_do_reset(host, SDHCI_RESET_CMD);
-+ sdhci_do_reset(host, SDHCI_RESET_DATA);
-+
- err = -EIO;
-+
-+ if (cmd.opcode != MMC_SEND_TUNING_BLOCK_HS200)
-+ goto out;
-+
-+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
-+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
-+
-+ spin_unlock_irqrestore(&host->lock, flags);
-+
-+ memset(&cmd, 0, sizeof(cmd));
-+ cmd.opcode = MMC_STOP_TRANSMISSION;
-+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
-+ cmd.busy_timeout = 50;
-+ mmc_wait_for_cmd(mmc, &cmd, 0);
-+
-+ spin_lock_irqsave(&host->lock, flags);
-+
- goto out;
- }
-
-diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
-index 60227a3..5588c56 100644
---- a/drivers/net/ethernet/marvell/mvpp2.c
-+++ b/drivers/net/ethernet/marvell/mvpp2.c
-@@ -770,6 +770,17 @@ struct mvpp2_rx_desc {
- u32 reserved8;
- };
-
-+struct mvpp2_txq_pcpu_buf {
-+ /* Transmitted SKB */
-+ struct sk_buff *skb;
-+
-+ /* Physical address of transmitted buffer */
-+ dma_addr_t phys;
-+
-+ /* Size transmitted */
-+ size_t size;
-+};
-+
- /* Per-CPU Tx queue control */
- struct mvpp2_txq_pcpu {
- int cpu;
-@@ -785,11 +796,8 @@ struct mvpp2_txq_pcpu {
- /* Number of Tx DMA descriptors reserved for each CPU */
- int reserved_num;
-
-- /* Array of transmitted skb */
-- struct sk_buff **tx_skb;
--
-- /* Array of transmitted buffers' physical addresses */
-- dma_addr_t *tx_buffs;
-+ /* Infos about transmitted buffers */
-+ struct mvpp2_txq_pcpu_buf *buffs;
-
- /* Index of last TX DMA descriptor that was inserted */
- int txq_put_index;
-@@ -979,10 +987,11 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
- struct sk_buff *skb,
- struct mvpp2_tx_desc *tx_desc)
- {
-- txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
-- if (skb)
-- txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
-- tx_desc->buf_phys_addr;
-+ struct mvpp2_txq_pcpu_buf *tx_buf =
-+ txq_pcpu->buffs + txq_pcpu->txq_put_index;
-+ tx_buf->skb = skb;
-+ tx_buf->size = tx_desc->data_size;
-+ tx_buf->phys = tx_desc->buf_phys_addr;
- txq_pcpu->txq_put_index++;
- if (txq_pcpu->txq_put_index == txq_pcpu->size)
- txq_pcpu->txq_put_index = 0;
-@@ -4401,17 +4410,16 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
- int i;
-
- for (i = 0; i < num; i++) {
-- dma_addr_t buf_phys_addr =
-- txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
-- struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
-+ struct mvpp2_txq_pcpu_buf *tx_buf =
-+ txq_pcpu->buffs + txq_pcpu->txq_get_index;
-
- mvpp2_txq_inc_get(txq_pcpu);
-
-- dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
-- skb_headlen(skb), DMA_TO_DEVICE);
-- if (!skb)
-+ dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
-+ tx_buf->size, DMA_TO_DEVICE);
-+ if (!tx_buf->skb)
- continue;
-- dev_kfree_skb_any(skb);
-+ dev_kfree_skb_any(tx_buf->skb);
- }
- }
-
-@@ -4651,15 +4659,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- txq_pcpu->size = txq->size;
-- txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
-- sizeof(*txq_pcpu->tx_skb),
-- GFP_KERNEL);
-- if (!txq_pcpu->tx_skb)
-- goto error;
--
-- txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
-- sizeof(dma_addr_t), GFP_KERNEL);
-- if (!txq_pcpu->tx_buffs)
-+ txq_pcpu->buffs = kmalloc(txq_pcpu->size *
-+ sizeof(struct mvpp2_txq_pcpu_buf),
-+ GFP_KERNEL);
-+ if (!txq_pcpu->buffs)
- goto error;
-
- txq_pcpu->count = 0;
-@@ -4673,8 +4676,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
- error:
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
-- kfree(txq_pcpu->tx_skb);
-- kfree(txq_pcpu->tx_buffs);
-+ kfree(txq_pcpu->buffs);
- }
-
- dma_free_coherent(port->dev->dev.parent,
-@@ -4693,8 +4695,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
-
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
-- kfree(txq_pcpu->tx_skb);
-- kfree(txq_pcpu->tx_buffs);
-+ kfree(txq_pcpu->buffs);
- }
-
- if (txq->descs)
-diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
-index 14b13f0..a35f78b 100644
---- a/drivers/net/wireless/ath/ath9k/hw.c
-+++ b/drivers/net/wireless/ath/ath9k/hw.c
-@@ -2792,7 +2792,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
- WARN_ON(1);
- }
-
-- return val;
-+ return !!val;
- }
- EXPORT_SYMBOL(ath9k_hw_gpio_get);
-
-diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
-index 0dd454a..aff473d 100644
---- a/drivers/net/wireless/ath/ath9k/pci.c
-+++ b/drivers/net/wireless/ath/ath9k/pci.c
-@@ -26,7 +26,6 @@ static const struct pci_device_id ath_pci_id_table[] = {
- { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
- { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
- { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
-- { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
-
- #ifdef CONFIG_ATH9K_PCOEM
- /* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */
-@@ -37,7 +36,7 @@ static const struct pci_device_id ath_pci_id_table[] = {
- .driver_data = ATH9K_PCI_LED_ACT_HI },
- #endif
-
-- { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
-+ { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
-
- #ifdef CONFIG_ATH9K_PCOEM
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
-@@ -85,7 +84,11 @@ static const struct pci_device_id ath_pci_id_table[] = {
- 0x10CF, /* Fujitsu */
- 0x1536),
- .driver_data = ATH9K_PCI_D3_L1_WAR },
-+#endif
-
-+ { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
-+
-+#ifdef CONFIG_ATH9K_PCOEM
- /* AR9285 card for Asus */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
- 0x002B,
-diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
-index c6b246a..4b78079 100644
---- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
-+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
-@@ -4401,6 +4401,13 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
- void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
- u8 macid, bool connect)
- {
-+#ifdef RTL8XXXU_GEN2_REPORT_CONNECT
-+ /*
-+ * Barry Day reports this causes issues with 8192eu and 8723bu
-+ * devices reconnecting. The reason for this is unclear, but
-+ * until it is better understood, leave the code in place but
-+ * disabled, so it is not lost.
-+ */
- struct h2c_cmd h2c;
-
- memset(&h2c, 0, sizeof(struct h2c_cmd));
-@@ -4412,6 +4419,7 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
- h2c.media_status_rpt.parm &= ~BIT(0);
-
- rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
-+#endif
- }
-
- void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
-diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
-index 264466f..4ac928b 100644
---- a/drivers/net/wireless/realtek/rtlwifi/base.c
-+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
-@@ -1303,12 +1303,13 @@ EXPORT_SYMBOL_GPL(rtl_action_proc);
-
- static void setup_arp_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc)
- {
-+ struct ieee80211_hw *hw = rtlpriv->hw;
-+
- rtlpriv->ra.is_special_data = true;
- if (rtlpriv->cfg->ops->get_btc_status())
- rtlpriv->btcoexist.btc_ops->btc_special_packet_notify(
- rtlpriv, 1);
-- rtlpriv->enter_ps = false;
-- schedule_work(&rtlpriv->works.lps_change_work);
-+ rtl_lps_leave(hw);
- ppsc->last_delaylps_stamp_jiffies = jiffies;
- }
-
-@@ -1381,8 +1382,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
-
- if (is_tx) {
- rtlpriv->ra.is_special_data = true;
-- rtlpriv->enter_ps = false;
-- schedule_work(&rtlpriv->works.lps_change_work);
-+ rtl_lps_leave(hw);
- ppsc->last_delaylps_stamp_jiffies = jiffies;
- }
-
-diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
-index 41f77f8..8f783ef 100644
---- a/drivers/net/wireless/realtek/rtlwifi/core.c
-+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
-@@ -1149,10 +1149,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
- } else {
- mstatus = RT_MEDIA_DISCONNECT;
-
-- if (mac->link_state == MAC80211_LINKED) {
-- rtlpriv->enter_ps = false;
-- schedule_work(&rtlpriv->works.lps_change_work);
-- }
-+ if (mac->link_state == MAC80211_LINKED)
-+ rtl_lps_leave(hw);
- if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
- rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
- mac->link_state = MAC80211_NOLINK;
-@@ -1430,8 +1428,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
- }
-
- if (mac->link_state == MAC80211_LINKED) {
-- rtlpriv->enter_ps = false;
-- schedule_work(&rtlpriv->works.lps_change_work);
-+ rtl_lps_leave(hw);
- mac->link_state = MAC80211_LINKED_SCANNING;
- } else {
- rtl_ips_nic_on(hw);
-diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
-index d12586d..e538e23 100644
---- a/drivers/net/wireless/realtek/rtlwifi/pci.c
-+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
-@@ -662,11 +662,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
- }
-
- if (((rtlpriv->link_info.num_rx_inperiod +
-- rtlpriv->link_info.num_tx_inperiod) > 8) ||
-- (rtlpriv->link_info.num_rx_inperiod > 2)) {
-- rtlpriv->enter_ps = false;
-- schedule_work(&rtlpriv->works.lps_change_work);
-- }
-+ rtlpriv->link_info.num_tx_inperiod) > 8) ||
-+ (rtlpriv->link_info.num_rx_inperiod > 2))
-+ rtl_lps_leave(hw);
- }
-
- static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
-@@ -917,10 +915,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
- }
- if (((rtlpriv->link_info.num_rx_inperiod +
- rtlpriv->link_info.num_tx_inperiod) > 8) ||
-- (rtlpriv->link_info.num_rx_inperiod > 2)) {
-- rtlpriv->enter_ps = false;
-- schedule_work(&rtlpriv->works.lps_change_work);
-- }
-+ (rtlpriv->link_info.num_rx_inperiod > 2))
-+ rtl_lps_leave(hw);
- skb = new_skb;
- no_new:
- if (rtlpriv->use_new_trx_flow) {
-diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
-index 9a64f9b..424e262 100644
---- a/drivers/net/wireless/realtek/rtlwifi/ps.c
-+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
-@@ -407,8 +407,8 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
- }
- }
-
--/*Enter the leisure power save mode.*/
--void rtl_lps_enter(struct ieee80211_hw *hw)
-+/* Interrupt safe routine to enter the leisure power save mode.*/
-+static void rtl_lps_enter_core(struct ieee80211_hw *hw)
- {
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-@@ -444,10 +444,9 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
-
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
- }
--EXPORT_SYMBOL(rtl_lps_enter);
-
--/*Leave the leisure power save mode.*/
--void rtl_lps_leave(struct ieee80211_hw *hw)
-+/* Interrupt safe routine to leave the leisure power save mode.*/
-+static void rtl_lps_leave_core(struct ieee80211_hw *hw)
- {
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-@@ -477,7 +476,6 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
- }
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
- }
--EXPORT_SYMBOL(rtl_lps_leave);
-
- /* For sw LPS*/
- void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
-@@ -670,12 +668,34 @@ void rtl_lps_change_work_callback(struct work_struct *work)
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->enter_ps)
-- rtl_lps_enter(hw);
-+ rtl_lps_enter_core(hw);
- else
-- rtl_lps_leave(hw);
-+ rtl_lps_leave_core(hw);
- }
- EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
-
-+void rtl_lps_enter(struct ieee80211_hw *hw)
-+{
-+ struct rtl_priv *rtlpriv = rtl_priv(hw);
-+
-+ if (!in_interrupt())
-+ return rtl_lps_enter_core(hw);
-+ rtlpriv->enter_ps = true;
-+ schedule_work(&rtlpriv->works.lps_change_work);
-+}
-+EXPORT_SYMBOL_GPL(rtl_lps_enter);
-+
-+void rtl_lps_leave(struct ieee80211_hw *hw)
-+{
-+ struct rtl_priv *rtlpriv = rtl_priv(hw);
-+
-+ if (!in_interrupt())
-+ return rtl_lps_leave_core(hw);
-+ rtlpriv->enter_ps = false;
-+ schedule_work(&rtlpriv->works.lps_change_work);
-+}
-+EXPORT_SYMBOL_GPL(rtl_lps_leave);
-+
- void rtl_swlps_wq_callback(void *data)
- {
- struct rtl_works *rtlworks = container_of_dwork_rtl(data,
-diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
-index cea8350..a2ac9e6 100644
---- a/drivers/nvdimm/pfn_devs.c
-+++ b/drivers/nvdimm/pfn_devs.c
-@@ -108,7 +108,7 @@ static ssize_t align_show(struct device *dev,
- {
- struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
-
-- return sprintf(buf, "%lx\n", nd_pfn->align);
-+ return sprintf(buf, "%ld\n", nd_pfn->align);
- }
-
- static ssize_t __align_store(struct nd_pfn *nd_pfn, const char *buf)
-diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
-index aab9d51..24db77e 100644
---- a/drivers/pci/pci.c
-+++ b/drivers/pci/pci.c
-@@ -2064,6 +2064,10 @@ bool pci_dev_run_wake(struct pci_dev *dev)
- if (!dev->pme_support)
- return false;
-
-+ /* PME-capable in principle, but not from the intended sleep state */
-+ if (!pci_pme_capable(dev, pci_target_state(dev)))
-+ return false;
-+
- while (bus->parent) {
- struct pci_dev *bridge = bus->self;
-
-diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
-index adecc1c..c4ed3e5 100644
---- a/drivers/platform/x86/asus-nb-wmi.c
-+++ b/drivers/platform/x86/asus-nb-wmi.c
-@@ -137,6 +137,15 @@ static const struct dmi_system_id asus_quirks[] = {
- },
- {
- .callback = dmi_matched,
-+ .ident = "ASUSTeK COMPUTER INC. X45U",
-+ .matches = {
-+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-+ DMI_MATCH(DMI_PRODUCT_NAME, "X45U"),
-+ },
-+ .driver_data = &quirk_asus_wapf4,
-+ },
-+ {
-+ .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. X456UA",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c
-index 7d2ae3e..342f5da 100644
---- a/drivers/regulator/stw481x-vmmc.c
-+++ b/drivers/regulator/stw481x-vmmc.c
-@@ -47,7 +47,8 @@ static struct regulator_desc vmmc_regulator = {
- .volt_table = stw481x_vmmc_voltages,
- .enable_time = 200, /* FIXME: look this up */
- .enable_reg = STW_CONF1,
-- .enable_mask = STW_CONF1_PDN_VMMC,
-+ .enable_mask = STW_CONF1_PDN_VMMC | STW_CONF1_MMC_LS_STATUS,
-+ .enable_val = STW_CONF1_PDN_VMMC,
- .vsel_reg = STW_CONF1,
- .vsel_mask = STW_CONF1_VMMC_MASK,
- };
-diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
-index e883063..3167e85 100644
---- a/drivers/s390/char/vmlogrdr.c
-+++ b/drivers/s390/char/vmlogrdr.c
-@@ -870,7 +870,7 @@ static int __init vmlogrdr_init(void)
- goto cleanup;
-
- for (i=0; i < MAXMINOR; ++i ) {
-- sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
-+ sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sys_ser[i].buffer) {
- rc = -ENOMEM;
- break;
-diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
-index 5810019..d5bf36e 100644
---- a/drivers/s390/scsi/zfcp_dbf.c
-+++ b/drivers/s390/scsi/zfcp_dbf.c
-@@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
-
-
- /**
-- * zfcp_dbf_rec_run - trace event related to running recovery
-+ * zfcp_dbf_rec_run_lvl - trace event related to running recovery
-+ * @level: trace level to be used for event
- * @tag: identifier for event
- * @erp: erp_action running
- */
--void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
-+void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
- {
- struct zfcp_dbf *dbf = erp->adapter->dbf;
- struct zfcp_dbf_rec *rec = &dbf->rec_buf;
-@@ -319,11 +320,21 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
- else
- rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
-
-- debug_event(dbf->rec, 1, rec, sizeof(*rec));
-+ debug_event(dbf->rec, level, rec, sizeof(*rec));
- spin_unlock_irqrestore(&dbf->rec_lock, flags);
- }
-
- /**
-+ * zfcp_dbf_rec_run - trace event related to running recovery
-+ * @tag: identifier for event
-+ * @erp: erp_action running
-+ */
-+void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
-+{
-+ zfcp_dbf_rec_run_lvl(1, tag, erp);
-+}
-+
-+/**
- * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
- * @tag: identifier for event
- * @wka_port: well known address port
-diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
-index 36d0758..db186d4 100644
---- a/drivers/s390/scsi/zfcp_dbf.h
-+++ b/drivers/s390/scsi/zfcp_dbf.h
-@@ -2,7 +2,7 @@
- * zfcp device driver
- * debug feature declarations
- *
-- * Copyright IBM Corp. 2008, 2015
-+ * Copyright IBM Corp. 2008, 2016
- */
-
- #ifndef ZFCP_DBF_H
-@@ -283,6 +283,30 @@ struct zfcp_dbf {
- struct zfcp_dbf_scsi scsi_buf;
- };
-
-+/**
-+ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
-+ * @req: request that has been completed
-+ *
-+ * Returns true if FCP response with only benign residual under count.
-+ */
-+static inline
-+bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
-+{
-+ struct fsf_qtcb *qtcb = req->qtcb;
-+ u32 fsf_stat = qtcb->header.fsf_status;
-+ struct fcp_resp *fcp_rsp;
-+ u8 rsp_flags, fr_status;
-+
-+ if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
-+ return false; /* not an FCP response */
-+ fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
-+ rsp_flags = fcp_rsp->fr_flags;
-+ fr_status = fcp_rsp->fr_status;
-+ return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
-+ (rsp_flags == FCP_RESID_UNDER) &&
-+ (fr_status == SAM_STAT_GOOD);
-+}
-+
- static inline
- void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
- {
-@@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
- zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
-
- } else if (qtcb->header.fsf_status != FSF_GOOD) {
-- zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
-+ zfcp_dbf_hba_fsf_resp("fs_ferr",
-+ zfcp_dbf_hba_fsf_resp_suppress(req)
-+ ? 5 : 1, req);
-
- } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
- (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
-@@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
- _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
- }
-
-+/**
-+ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
-+ * @scmnd: SCSI command that was NULLified.
-+ * @fsf_req: request that owned @scmnd.
-+ */
-+static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
-+ struct zfcp_fsf_req *fsf_req)
-+{
-+ _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
-+}
-+
- #endif /* ZFCP_DBF_H */
-diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
-index a59d678..7ccfce5 100644
---- a/drivers/s390/scsi/zfcp_erp.c
-+++ b/drivers/s390/scsi/zfcp_erp.c
-@@ -3,7 +3,7 @@
- *
- * Error Recovery Procedures (ERP).
- *
-- * Copyright IBM Corp. 2002, 2015
-+ * Copyright IBM Corp. 2002, 2016
- */
-
- #define KMSG_COMPONENT "zfcp"
-@@ -1204,6 +1204,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
- }
- }
-
-+/**
-+ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
-+ * @port: zfcp_port whose fc_rport we should try to unblock
-+ */
-+static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
-+{
-+ unsigned long flags;
-+ struct zfcp_adapter *adapter = port->adapter;
-+ int port_status;
-+ struct Scsi_Host *shost = adapter->scsi_host;
-+ struct scsi_device *sdev;
-+
-+ write_lock_irqsave(&adapter->erp_lock, flags);
-+ port_status = atomic_read(&port->status);
-+ if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
-+ (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
-+ ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
-+ /* new ERP of severity >= port triggered elsewhere meanwhile or
-+ * local link down (adapter erp_failed but not clear unblock)
-+ */
-+ zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
-+ write_unlock_irqrestore(&adapter->erp_lock, flags);
-+ return;
-+ }
-+ spin_lock(shost->host_lock);
-+ __shost_for_each_device(sdev, shost) {
-+ struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
-+ int lun_status;
-+
-+ if (zsdev->port != port)
-+ continue;
-+ /* LUN under port of interest */
-+ lun_status = atomic_read(&zsdev->status);
-+ if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
-+ continue; /* unblock rport despite failed LUNs */
-+ /* LUN recovery not given up yet [maybe follow-up pending] */
-+ if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
-+ (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
-+ /* LUN blocked:
-+ * not yet unblocked [LUN recovery pending]
-+ * or meanwhile blocked [new LUN recovery triggered]
-+ */
-+ zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
-+ spin_unlock(shost->host_lock);
-+ write_unlock_irqrestore(&adapter->erp_lock, flags);
-+ return;
-+ }
-+ }
-+ /* now port has no child or all children have completed recovery,
-+ * and no ERP of severity >= port was meanwhile triggered elsewhere
-+ */
-+ zfcp_scsi_schedule_rport_register(port);
-+ spin_unlock(shost->host_lock);
-+ write_unlock_irqrestore(&adapter->erp_lock, flags);
-+}
-+
- static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
- {
- struct zfcp_adapter *adapter = act->adapter;
-@@ -1214,6 +1270,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
- case ZFCP_ERP_ACTION_REOPEN_LUN:
- if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
- scsi_device_put(sdev);
-+ zfcp_erp_try_rport_unblock(port);
- break;
-
- case ZFCP_ERP_ACTION_REOPEN_PORT:
-@@ -1224,7 +1281,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
- */
- if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
- if (result == ZFCP_ERP_SUCCEEDED)
-- zfcp_scsi_schedule_rport_register(port);
-+ zfcp_erp_try_rport_unblock(port);
- /* fall through */
- case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
- put_device(&port->dev);
-diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
-index c8fed9f..21c8c68 100644
---- a/drivers/s390/scsi/zfcp_ext.h
-+++ b/drivers/s390/scsi/zfcp_ext.h
-@@ -3,7 +3,7 @@
- *
- * External function declarations.
- *
-- * Copyright IBM Corp. 2002, 2015
-+ * Copyright IBM Corp. 2002, 2016
- */
-
- #ifndef ZFCP_EXT_H
-@@ -35,6 +35,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
- extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
- struct zfcp_port *, struct scsi_device *, u8, u8);
- extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
-+extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
-+ struct zfcp_erp_action *erp);
- extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
- extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
- extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
-diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
-index be1c04b..ea3c76a 100644
---- a/drivers/s390/scsi/zfcp_fsf.h
-+++ b/drivers/s390/scsi/zfcp_fsf.h
-@@ -3,7 +3,7 @@
- *
- * Interface to the FSF support functions.
- *
-- * Copyright IBM Corp. 2002, 2015
-+ * Copyright IBM Corp. 2002, 2016
- */
-
- #ifndef FSF_H
-@@ -78,6 +78,7 @@
- #define FSF_APP_TAG_CHECK_FAILURE 0x00000082
- #define FSF_REF_TAG_CHECK_FAILURE 0x00000083
- #define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
-+#define FSF_FCP_RSP_AVAILABLE 0x000000AF
- #define FSF_UNKNOWN_COMMAND 0x000000E2
- #define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3
- #define FSF_INVALID_COMMAND_OPTION 0x000000E5
-diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
-index 7c2c619..703fce5 100644
---- a/drivers/s390/scsi/zfcp_reqlist.h
-+++ b/drivers/s390/scsi/zfcp_reqlist.h
-@@ -4,7 +4,7 @@
- * Data structure and helper functions for tracking pending FSF
- * requests.
- *
-- * Copyright IBM Corp. 2009
-+ * Copyright IBM Corp. 2009, 2016
- */
-
- #ifndef ZFCP_REQLIST_H
-@@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl,
- spin_unlock_irqrestore(&rl->lock, flags);
- }
-
-+/**
-+ * zfcp_reqlist_apply_for_all() - apply a function to every request.
-+ * @rl: the requestlist that contains the target requests.
-+ * @f: the function to apply to each request; the first parameter of the
-+ * function will be the target-request; the second parameter is the same
-+ * pointer as given with the argument @data.
-+ * @data: freely chosen argument; passed through to @f as second parameter.
-+ *
-+ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash-
-+ * table (not a 'safe' variant, so don't modify the list).
-+ *
-+ * Holds @rl->lock over the entire request-iteration.
-+ */
-+static inline void
-+zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl,
-+ void (*f)(struct zfcp_fsf_req *, void *), void *data)
-+{
-+ struct zfcp_fsf_req *req;
-+ unsigned long flags;
-+ unsigned int i;
-+
-+ spin_lock_irqsave(&rl->lock, flags);
-+ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++)
-+ list_for_each_entry(req, &rl->buckets[i], list)
-+ f(req, data);
-+ spin_unlock_irqrestore(&rl->lock, flags);
-+}
-+
- #endif /* ZFCP_REQLIST_H */
-diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
-index 9069f98..07ffdbb 100644
---- a/drivers/s390/scsi/zfcp_scsi.c
-+++ b/drivers/s390/scsi/zfcp_scsi.c
-@@ -3,7 +3,7 @@
- *
- * Interface to Linux SCSI midlayer.
- *
-- * Copyright IBM Corp. 2002, 2015
-+ * Copyright IBM Corp. 2002, 2016
- */
-
- #define KMSG_COMPONENT "zfcp"
-@@ -88,9 +88,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt)
- }
-
- if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
-- /* This could be either
-- * open LUN pending: this is temporary, will result in
-- * open LUN or ERP_FAILED, so retry command
-+ /* This could be
- * call to rport_delete pending: mimic retry from
- * fc_remote_port_chkready until rport is BLOCKED
- */
-@@ -209,6 +207,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
- return retval;
- }
-
-+struct zfcp_scsi_req_filter {
-+ u8 tmf_scope;
-+ u32 lun_handle;
-+ u32 port_handle;
-+};
-+
-+static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data)
-+{
-+ struct zfcp_scsi_req_filter *filter =
-+ (struct zfcp_scsi_req_filter *)data;
-+
-+ /* already aborted - prevent side-effects - or not a SCSI command */
-+ if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND)
-+ return;
-+
-+ /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */
-+ if (old_req->qtcb->header.port_handle != filter->port_handle)
-+ return;
-+
-+ if (filter->tmf_scope == FCP_TMF_LUN_RESET &&
-+ old_req->qtcb->header.lun_handle != filter->lun_handle)
-+ return;
-+
-+ zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req);
-+ old_req->data = NULL;
-+}
-+
-+static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
-+{
-+ struct zfcp_adapter *adapter = zsdev->port->adapter;
-+ struct zfcp_scsi_req_filter filter = {
-+ .tmf_scope = FCP_TMF_TGT_RESET,
-+ .port_handle = zsdev->port->handle,
-+ };
-+ unsigned long flags;
-+
-+ if (tm_flags == FCP_TMF_LUN_RESET) {
-+ filter.tmf_scope = FCP_TMF_LUN_RESET;
-+ filter.lun_handle = zsdev->lun_handle;
-+ }
-+
-+ /*
-+ * abort_lock secures against other processings - in the abort-function
-+ * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
-+ */
-+ write_lock_irqsave(&adapter->abort_lock, flags);
-+ zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
-+ &filter);
-+ write_unlock_irqrestore(&adapter->abort_lock, flags);
-+}
-+
- static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
- {
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
-@@ -241,8 +290,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
- if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
- zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
- retval = FAILED;
-- } else
-+ } else {
- zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
-+ zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
-+ }
-
- zfcp_fsf_req_free(fsf_req);
- return retval;
-diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
-index 79871f3..d5b26fa 100644
---- a/drivers/scsi/aacraid/linit.c
-+++ b/drivers/scsi/aacraid/linit.c
-@@ -160,7 +160,6 @@ static const struct pci_device_id aac_pci_tbl[] = {
- { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
- { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
- { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
-- { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
- { 0,}
- };
- MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
-@@ -239,7 +238,6 @@ static struct aac_driver_ident aac_drivers[] = {
- { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
-- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
- };
-
- /**
-diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
-index 52d8bbf..bd04bd0 100644
---- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
-+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
-@@ -2000,6 +2000,8 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
- io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
- pRAID_Context->regLockFlags |=
- (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
-+ pRAID_Context->Type = MPI2_TYPE_CUDA;
-+ pRAID_Context->nseg = 0x1;
- } else if (fusion->fast_path_io) {
- pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
- pRAID_Context->configSeqNum = 0;
-@@ -2035,12 +2037,10 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
- pRAID_Context->timeoutValue =
- cpu_to_le16((os_timeout_value > timeout_limit) ?
- timeout_limit : os_timeout_value);
-- if (fusion->adapter_type == INVADER_SERIES) {
-- pRAID_Context->Type = MPI2_TYPE_CUDA;
-- pRAID_Context->nseg = 0x1;
-+ if (fusion->adapter_type == INVADER_SERIES)
- io_request->IoFlags |=
- cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
-- }
-+
- cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
- MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
-@@ -2823,6 +2823,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
- dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
- "will reset adapter scsi%d.\n",
- instance->host->host_no);
-+ *convert = 1;
- retval = 1;
- }
- out:
-diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
-index 0734927..82dfe07 100644
---- a/drivers/scsi/scsi_sysfs.c
-+++ b/drivers/scsi/scsi_sysfs.c
-@@ -1204,10 +1204,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
- struct request_queue *rq = sdev->request_queue;
- struct scsi_target *starget = sdev->sdev_target;
-
-- error = scsi_device_set_state(sdev, SDEV_RUNNING);
-- if (error)
-- return error;
--
- error = scsi_target_add(starget);
- if (error)
- return error;
-diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
-index ae7d9bd..a1c29b0 100644
---- a/drivers/scsi/sg.c
-+++ b/drivers/scsi/sg.c
-@@ -592,6 +592,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
- sg_io_hdr_t *hp;
- unsigned char cmnd[SG_MAX_CDB_SIZE];
-
-+ if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
-+ return -EINVAL;
-+
- if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
- return -ENXIO;
- SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
-diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
-index 0f28c08..77b551d 100644
---- a/drivers/ssb/pci.c
-+++ b/drivers/ssb/pci.c
-@@ -909,6 +909,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
- if (err) {
- ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
- err);
-+ goto out_free;
- } else {
- ssb_dbg("Using SPROM revision %d provided by platform\n",
- sprom->revision);
-diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
-index 0f97d7b..1c967c3 100644
---- a/drivers/staging/comedi/drivers/ni_mio_common.c
-+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
-@@ -1832,7 +1832,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
- unsigned int *data)
- {
- struct ni_private *devpriv = dev->private;
-- unsigned int mask = (s->maxdata + 1) >> 1;
-+ unsigned int mask = s->maxdata;
- int i, n;
- unsigned int signbits;
- unsigned int d;
-@@ -1875,7 +1875,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
- return -ETIME;
- }
- d += signbits;
-- data[n] = d;
-+ data[n] = d & 0xffff;
- }
- } else if (devpriv->is_6143) {
- for (n = 0; n < insn->n; n++) {
-@@ -1924,9 +1924,8 @@ static int ni_ai_insn_read(struct comedi_device *dev,
- data[n] = dl;
- } else {
- d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
-- /* subtle: needs to be short addition */
- d += signbits;
-- data[n] = d;
-+ data[n] = d & 0xffff;
- }
- }
- }
-diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
-index 923c032..e980e2d 100644
---- a/drivers/target/iscsi/iscsi_target_configfs.c
-+++ b/drivers/target/iscsi/iscsi_target_configfs.c
-@@ -100,8 +100,10 @@ static ssize_t lio_target_np_driver_store(struct config_item *item,
-
- tpg_np_new = iscsit_tpg_add_network_portal(tpg,
- &np->np_sockaddr, tpg_np, type);
-- if (IS_ERR(tpg_np_new))
-+ if (IS_ERR(tpg_np_new)) {
-+ rc = PTR_ERR(tpg_np_new);
- goto out;
-+ }
- } else {
- tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
- if (tpg_np_new) {
-diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
-index 62bf4fe..b8a986c 100644
---- a/drivers/target/target_core_user.c
-+++ b/drivers/target/target_core_user.c
-@@ -682,8 +682,6 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
- target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
- cmd->se_cmd = NULL;
-
-- kmem_cache_free(tcmu_cmd_cache, cmd);
--
- return 0;
- }
-
-diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
-index c41c774..2dcd419 100644
---- a/drivers/thermal/thermal_hwmon.c
-+++ b/drivers/thermal/thermal_hwmon.c
-@@ -98,7 +98,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
- int temperature;
- int ret;
-
-- ret = tz->ops->get_trip_temp(tz, 0, &temperature);
-+ ret = tz->ops->get_crit_temp(tz, &temperature);
- if (ret)
- return ret;
-
-diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
-index f36e6df..e086ea4 100644
---- a/drivers/tty/serial/sc16is7xx.c
-+++ b/drivers/tty/serial/sc16is7xx.c
-@@ -1240,7 +1240,7 @@ static int sc16is7xx_probe(struct device *dev,
-
- /* Setup interrupt */
- ret = devm_request_irq(dev, irq, sc16is7xx_irq,
-- IRQF_ONESHOT | flags, dev_name(dev), s);
-+ flags, dev_name(dev), s);
- if (!ret)
- return 0;
-
-diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
-index 0f8caae..ece10e6 100644
---- a/drivers/tty/vt/keyboard.c
-+++ b/drivers/tty/vt/keyboard.c
-@@ -982,7 +982,7 @@ static void kbd_led_trigger_activate(struct led_classdev *cdev)
- KBD_LED_TRIGGER((_led_bit) + 8, _name)
-
- static struct kbd_led_trigger kbd_led_triggers[] = {
-- KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrollock"),
-+ KBD_LED_TRIGGER(VC_SCROLLOCK, "kbd-scrolllock"),
- KBD_LED_TRIGGER(VC_NUMLOCK, "kbd-numlock"),
- KBD_LED_TRIGGER(VC_CAPSLOCK, "kbd-capslock"),
- KBD_LED_TRIGGER(VC_KANALOCK, "kbd-kanalock"),
-diff --git a/fs/block_dev.c b/fs/block_dev.c
-index b010242..496d99b 100644
---- a/fs/block_dev.c
-+++ b/fs/block_dev.c
-@@ -1885,6 +1885,7 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
- spin_lock(&blockdev_superblock->s_inode_list_lock);
- list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
- struct address_space *mapping = inode->i_mapping;
-+ struct block_device *bdev;
-
- spin_lock(&inode->i_lock);
- if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
-@@ -1905,8 +1906,12 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
- */
- iput(old_inode);
- old_inode = inode;
-+ bdev = I_BDEV(inode);
-
-- func(I_BDEV(inode), arg);
-+ mutex_lock(&bdev->bd_mutex);
-+ if (bdev->bd_openers)
-+ func(bdev, arg);
-+ mutex_unlock(&bdev->bd_mutex);
-
- spin_lock(&blockdev_superblock->s_inode_list_lock);
- }
-diff --git a/fs/nfs/file.c b/fs/nfs/file.c
-index ca699dd..e6a0d22 100644
---- a/fs/nfs/file.c
-+++ b/fs/nfs/file.c
-@@ -397,7 +397,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
- */
- if (!PageUptodate(page)) {
- unsigned pglen = nfs_page_length(page);
-- unsigned end = offset + len;
-+ unsigned end = offset + copied;
-
- if (pglen == 0) {
- zero_user_segments(page, 0, offset,
-diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
-index 51b5136..dcc21f9 100644
---- a/fs/nfs/flexfilelayout/flexfilelayout.c
-+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
-@@ -28,6 +28,9 @@
-
- static struct group_info *ff_zero_group;
-
-+static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
-+ struct nfs_pgio_header *hdr);
-+
- static struct pnfs_layout_hdr *
- ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
- {
-@@ -1293,6 +1296,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
- hdr->pgio_mirror_idx + 1,
- &hdr->pgio_mirror_idx))
- goto out_eagain;
-+ ff_layout_read_record_layoutstats_done(task, hdr);
- pnfs_read_resend_pnfs(hdr);
- return task->tk_status;
- case -NFS4ERR_RESET_TO_MDS:
-diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
-index 2c93a85..eb8edb3 100644
---- a/fs/nfs/pnfs.c
-+++ b/fs/nfs/pnfs.c
-@@ -252,6 +252,14 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
- }
- }
-
-+static void
-+pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
-+{
-+ lo->plh_return_iomode = 0;
-+ lo->plh_return_seq = 0;
-+ clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
-+}
-+
- /*
- * Mark a pnfs_layout_hdr and all associated layout segments as invalid
- *
-@@ -270,6 +278,7 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
- };
-
- set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
-+ pnfs_clear_layoutreturn_info(lo);
- return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range, 0);
- }
-
-@@ -364,7 +373,9 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
- list_del_init(&lseg->pls_list);
- /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
- atomic_dec(&lo->plh_refcount);
-- if (list_empty(&lo->plh_segs)) {
-+ if (list_empty(&lo->plh_segs) &&
-+ !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
-+ !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
- if (atomic_read(&lo->plh_outstanding) == 0)
- set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
- clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
-@@ -769,14 +780,6 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
- pnfs_destroy_layouts_byclid(clp, false);
- }
-
--static void
--pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
--{
-- lo->plh_return_iomode = 0;
-- lo->plh_return_seq = 0;
-- clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
--}
--
- /* update lo->plh_stateid with new if is more recent */
- void
- pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
-@@ -897,6 +900,7 @@ static void pnfs_clear_layoutcommit(struct inode *inode,
- void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
- {
- clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
-+ clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
- smp_mb__after_atomic();
- wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
- rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
-@@ -910,8 +914,9 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
- /* Serialise LAYOUTGET/LAYOUTRETURN */
- if (atomic_read(&lo->plh_outstanding) != 0)
- return false;
-- if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
-+ if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
- return false;
-+ set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
- pnfs_get_layout_hdr(lo);
- if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
- if (stateid != NULL) {
-@@ -1903,6 +1908,8 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
-
- spin_lock(&inode->i_lock);
- pnfs_set_plh_return_info(lo, range.iomode, 0);
-+ /* Block LAYOUTGET */
-+ set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
- /*
- * mark all matching lsegs so that we are sure to have no live
- * segments at hand when sending layoutreturn. See pnfs_put_lseg()
-@@ -2241,6 +2248,10 @@ void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
- struct nfs_pageio_descriptor pgio;
-
- if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
-+ /* Prevent deadlocks with layoutreturn! */
-+ pnfs_put_lseg(hdr->lseg);
-+ hdr->lseg = NULL;
-+
- nfs_pageio_init_read(&pgio, hdr->inode, false,
- hdr->completion_ops);
- hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
-diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
-index 31d99b2..98dbb51 100644
---- a/fs/nfs/pnfs.h
-+++ b/fs/nfs/pnfs.h
-@@ -96,6 +96,7 @@ enum {
- NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
- NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
- NFS_LAYOUT_RETURN, /* layoutreturn in progress */
-+ NFS_LAYOUT_RETURN_LOCK, /* Serialise layoutreturn */
- NFS_LAYOUT_RETURN_REQUESTED, /* Return this layout ASAP */
- NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */
- NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */
-diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
-index 741077d..a364524 100644
---- a/fs/notify/inode_mark.c
-+++ b/fs/notify/inode_mark.c
-@@ -150,12 +150,10 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
- */
- void fsnotify_unmount_inodes(struct super_block *sb)
- {
-- struct inode *inode, *next_i, *need_iput = NULL;
-+ struct inode *inode, *iput_inode = NULL;
-
- spin_lock(&sb->s_inode_list_lock);
-- list_for_each_entry_safe(inode, next_i, &sb->s_inodes, i_sb_list) {
-- struct inode *need_iput_tmp;
--
-+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
- /*
- * We cannot __iget() an inode in state I_FREEING,
- * I_WILL_FREE, or I_NEW which is fine because by that point
-@@ -178,49 +176,24 @@ void fsnotify_unmount_inodes(struct super_block *sb)
- continue;
- }
-
-- need_iput_tmp = need_iput;
-- need_iput = NULL;
--
-- /* In case fsnotify_inode_delete() drops a reference. */
-- if (inode != need_iput_tmp)
-- __iget(inode);
-- else
-- need_iput_tmp = NULL;
-+ __iget(inode);
- spin_unlock(&inode->i_lock);
--
-- /* In case the dropping of a reference would nuke next_i. */
-- while (&next_i->i_sb_list != &sb->s_inodes) {
-- spin_lock(&next_i->i_lock);
-- if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
-- atomic_read(&next_i->i_count)) {
-- __iget(next_i);
-- need_iput = next_i;
-- spin_unlock(&next_i->i_lock);
-- break;
-- }
-- spin_unlock(&next_i->i_lock);
-- next_i = list_next_entry(next_i, i_sb_list);
-- }
--
-- /*
-- * We can safely drop s_inode_list_lock here because either
-- * we actually hold references on both inode and next_i or
-- * end of list. Also no new inodes will be added since the
-- * umount has begun.
-- */
- spin_unlock(&sb->s_inode_list_lock);
-
-- if (need_iput_tmp)
-- iput(need_iput_tmp);
-+ if (iput_inode)
-+ iput(iput_inode);
-
- /* for each watch, send FS_UNMOUNT and then remove it */
- fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
-
- fsnotify_inode_delete(inode);
-
-- iput(inode);
-+ iput_inode = inode;
-
- spin_lock(&sb->s_inode_list_lock);
- }
- spin_unlock(&sb->s_inode_list_lock);
-+
-+ if (iput_inode)
-+ iput(iput_inode);
- }
-diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
-index beb7610..95b1b57b 100644
---- a/include/net/cfg80211.h
-+++ b/include/net/cfg80211.h
-@@ -4393,6 +4393,17 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
- void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss);
-
- /**
-+ * cfg80211_abandon_assoc - notify cfg80211 of abandoned association attempt
-+ * @dev: network device
-+ * @bss: The BSS entry with which association was abandoned.
-+ *
-+ * Call this whenever - for reasons reported through other API, like deauth RX,
-+ * an association attempt was abandoned.
-+ * This function may sleep. The caller must hold the corresponding wdev's mutex.
-+ */
-+void cfg80211_abandon_assoc(struct net_device *dev, struct cfg80211_bss *bss);
-+
-+/**
- * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame
- * @dev: network device
- * @buf: 802.11 frame (header + body)
-diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
-index 931a47b..1beab55 100644
---- a/include/rdma/ib_addr.h
-+++ b/include/rdma/ib_addr.h
-@@ -205,10 +205,12 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
-
- dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
- if (dev) {
-- ip4 = (struct in_device *)dev->ip_ptr;
-- if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
-+ ip4 = in_dev_get(dev);
-+ if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) {
- ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
- (struct in6_addr *)gid);
-+ in_dev_put(ip4);
-+ }
- dev_put(dev);
- }
- }
-diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
-index 37dec7e..46e312e 100644
---- a/kernel/time/timekeeping.c
-+++ b/kernel/time/timekeeping.c
-@@ -299,10 +299,10 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
- static inline u32 arch_gettimeoffset(void) { return 0; }
- #endif
-
--static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
-+static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
- cycle_t delta)
- {
-- s64 nsec;
-+ u64 nsec;
-
- nsec = delta * tkr->mult + tkr->xtime_nsec;
- nsec >>= tkr->shift;
-diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
-index 7363ccf..16047a8 100644
---- a/kernel/trace/trace_functions_graph.c
-+++ b/kernel/trace/trace_functions_graph.c
-@@ -780,6 +780,10 @@ print_graph_entry_leaf(struct trace_iterator *iter,
-
- cpu_data = per_cpu_ptr(data->cpu_data, cpu);
-
-+ /* If a graph tracer ignored set_graph_notrace */
-+ if (call->depth < -1)
-+ call->depth += FTRACE_NOTRACE_DEPTH;
-+
- /*
- * Comments display at + 1 to depth. Since
- * this is a leaf function, keep the comments
-@@ -788,7 +792,8 @@ print_graph_entry_leaf(struct trace_iterator *iter,
- cpu_data->depth = call->depth - 1;
-
- /* No need to keep this function around for this depth */
-- if (call->depth < FTRACE_RETFUNC_DEPTH)
-+ if (call->depth < FTRACE_RETFUNC_DEPTH &&
-+ !WARN_ON_ONCE(call->depth < 0))
- cpu_data->enter_funcs[call->depth] = 0;
- }
-
-@@ -818,11 +823,16 @@ print_graph_entry_nested(struct trace_iterator *iter,
- struct fgraph_cpu_data *cpu_data;
- int cpu = iter->cpu;
-
-+ /* If a graph tracer ignored set_graph_notrace */
-+ if (call->depth < -1)
-+ call->depth += FTRACE_NOTRACE_DEPTH;
-+
- cpu_data = per_cpu_ptr(data->cpu_data, cpu);
- cpu_data->depth = call->depth;
-
- /* Save this function pointer to see if the exit matches */
-- if (call->depth < FTRACE_RETFUNC_DEPTH)
-+ if (call->depth < FTRACE_RETFUNC_DEPTH &&
-+ !WARN_ON_ONCE(call->depth < 0))
- cpu_data->enter_funcs[call->depth] = call->func;
- }
-
-@@ -1052,7 +1062,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
- */
- cpu_data->depth = trace->depth - 1;
-
-- if (trace->depth < FTRACE_RETFUNC_DEPTH) {
-+ if (trace->depth < FTRACE_RETFUNC_DEPTH &&
-+ !WARN_ON_ONCE(trace->depth < 0)) {
- if (cpu_data->enter_funcs[trace->depth] != trace->func)
- func_match = 0;
- cpu_data->enter_funcs[trace->depth] = 0;
-diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
-index a550289..2efb335 100644
---- a/net/ceph/messenger.c
-+++ b/net/ceph/messenger.c
-@@ -2027,6 +2027,19 @@ static int process_connect(struct ceph_connection *con)
-
- dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
-
-+ if (con->auth_reply_buf) {
-+ /*
-+ * Any connection that defines ->get_authorizer()
-+ * should also define ->verify_authorizer_reply().
-+ * See get_connect_authorizer().
-+ */
-+ ret = con->ops->verify_authorizer_reply(con, 0);
-+ if (ret < 0) {
-+ con->error_msg = "bad authorize reply";
-+ return ret;
-+ }
-+ }
-+
- switch (con->in_reply.tag) {
- case CEPH_MSGR_TAG_FEATURES:
- pr_err("%s%lld %s feature set mismatch,"
-diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
-index 8d426f6..b2e3c32 100644
---- a/net/mac80211/mlme.c
-+++ b/net/mac80211/mlme.c
-@@ -2506,7 +2506,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
- }
-
- static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
-- bool assoc)
-+ bool assoc, bool abandon)
- {
- struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
-
-@@ -2529,6 +2529,9 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
- mutex_lock(&sdata->local->mtx);
- ieee80211_vif_release_channel(sdata);
- mutex_unlock(&sdata->local->mtx);
-+
-+ if (abandon)
-+ cfg80211_abandon_assoc(sdata->dev, assoc_data->bss);
- }
-
- kfree(assoc_data);
-@@ -2758,7 +2761,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
- bssid, reason_code,
- ieee80211_get_reason_code_string(reason_code));
-
-- ieee80211_destroy_assoc_data(sdata, false);
-+ ieee80211_destroy_assoc_data(sdata, false, true);
-
- cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
- return;
-@@ -3163,14 +3166,14 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
- if (status_code != WLAN_STATUS_SUCCESS) {
- sdata_info(sdata, "%pM denied association (code=%d)\n",
- mgmt->sa, status_code);
-- ieee80211_destroy_assoc_data(sdata, false);
-+ ieee80211_destroy_assoc_data(sdata, false, false);
- event.u.mlme.status = MLME_DENIED;
- event.u.mlme.reason = status_code;
- drv_event_callback(sdata->local, sdata, &event);
- } else {
- if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) {
- /* oops -- internal error -- send timeout for now */
-- ieee80211_destroy_assoc_data(sdata, false);
-+ ieee80211_destroy_assoc_data(sdata, false, false);
- cfg80211_assoc_timeout(sdata->dev, bss);
- return;
- }
-@@ -3183,7 +3186,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
- * recalc after assoc_data is NULL but before associated
- * is set can cause the interface to go idle
- */
-- ieee80211_destroy_assoc_data(sdata, true);
-+ ieee80211_destroy_assoc_data(sdata, true, false);
-
- /* get uapsd queues configuration */
- uapsd_queues = 0;
-@@ -3882,7 +3885,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
- .u.mlme.status = MLME_TIMEOUT,
- };
-
-- ieee80211_destroy_assoc_data(sdata, false);
-+ ieee80211_destroy_assoc_data(sdata, false, false);
- cfg80211_assoc_timeout(sdata->dev, bss);
- drv_event_callback(sdata->local, sdata, &event);
- }
-@@ -4021,7 +4024,7 @@ void ieee80211_mgd_quiesce(struct ieee80211_sub_if_data *sdata)
- WLAN_REASON_DEAUTH_LEAVING,
- false, frame_buf);
- if (ifmgd->assoc_data)
-- ieee80211_destroy_assoc_data(sdata, false);
-+ ieee80211_destroy_assoc_data(sdata, false, true);
- if (ifmgd->auth_data)
- ieee80211_destroy_auth_data(sdata, false);
- cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
-@@ -4903,7 +4906,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
- IEEE80211_STYPE_DEAUTH,
- req->reason_code, tx,
- frame_buf);
-- ieee80211_destroy_assoc_data(sdata, false);
-+ ieee80211_destroy_assoc_data(sdata, false, true);
- ieee80211_report_disconnect(sdata, frame_buf,
- sizeof(frame_buf), true,
- req->reason_code);
-@@ -4978,7 +4981,7 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
- sdata_lock(sdata);
- if (ifmgd->assoc_data) {
- struct cfg80211_bss *bss = ifmgd->assoc_data->bss;
-- ieee80211_destroy_assoc_data(sdata, false);
-+ ieee80211_destroy_assoc_data(sdata, false, false);
- cfg80211_assoc_timeout(sdata->dev, bss);
- }
- if (ifmgd->auth_data)
-diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
-index 976c781..a0110c2 100644
---- a/net/sunrpc/auth_gss/auth_gss.c
-+++ b/net/sunrpc/auth_gss/auth_gss.c
-@@ -541,9 +541,13 @@ gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
- return gss_new;
- gss_msg = gss_add_msg(gss_new);
- if (gss_msg == gss_new) {
-- int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
-+ int res;
-+ atomic_inc(&gss_msg->count);
-+ res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
- if (res) {
- gss_unhash_msg(gss_new);
-+ atomic_dec(&gss_msg->count);
-+ gss_release_msg(gss_new);
- gss_msg = ERR_PTR(res);
- }
- } else
-@@ -836,6 +840,7 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
- warn_gssd();
- gss_release_msg(gss_msg);
- }
-+ gss_release_msg(gss_msg);
- }
-
- static void gss_pipe_dentry_destroy(struct dentry *dir,
-diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
-index a53b3a1..62c056e 100644
---- a/net/vmw_vsock/virtio_transport_common.c
-+++ b/net/vmw_vsock/virtio_transport_common.c
-@@ -606,9 +606,9 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
- return 0;
-
- pkt = virtio_transport_alloc_pkt(&info, 0,
-- le32_to_cpu(pkt->hdr.dst_cid),
-+ le64_to_cpu(pkt->hdr.dst_cid),
- le32_to_cpu(pkt->hdr.dst_port),
-- le32_to_cpu(pkt->hdr.src_cid),
-+ le64_to_cpu(pkt->hdr.src_cid),
- le32_to_cpu(pkt->hdr.src_port));
- if (!pkt)
- return -ENOMEM;
-@@ -823,7 +823,7 @@ virtio_transport_send_response(struct vsock_sock *vsk,
- struct virtio_vsock_pkt_info info = {
- .op = VIRTIO_VSOCK_OP_RESPONSE,
- .type = VIRTIO_VSOCK_TYPE_STREAM,
-- .remote_cid = le32_to_cpu(pkt->hdr.src_cid),
-+ .remote_cid = le64_to_cpu(pkt->hdr.src_cid),
- .remote_port = le32_to_cpu(pkt->hdr.src_port),
- .reply = true,
- };
-@@ -863,9 +863,9 @@ virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
- child->sk_state = SS_CONNECTED;
-
- vchild = vsock_sk(child);
-- vsock_addr_init(&vchild->local_addr, le32_to_cpu(pkt->hdr.dst_cid),
-+ vsock_addr_init(&vchild->local_addr, le64_to_cpu(pkt->hdr.dst_cid),
- le32_to_cpu(pkt->hdr.dst_port));
-- vsock_addr_init(&vchild->remote_addr, le32_to_cpu(pkt->hdr.src_cid),
-+ vsock_addr_init(&vchild->remote_addr, le64_to_cpu(pkt->hdr.src_cid),
- le32_to_cpu(pkt->hdr.src_port));
-
- vsock_insert_connected(vchild);
-@@ -904,9 +904,9 @@ void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt)
- struct sock *sk;
- bool space_available;
-
-- vsock_addr_init(&src, le32_to_cpu(pkt->hdr.src_cid),
-+ vsock_addr_init(&src, le64_to_cpu(pkt->hdr.src_cid),
- le32_to_cpu(pkt->hdr.src_port));
-- vsock_addr_init(&dst, le32_to_cpu(pkt->hdr.dst_cid),
-+ vsock_addr_init(&dst, le64_to_cpu(pkt->hdr.dst_cid),
- le32_to_cpu(pkt->hdr.dst_port));
-
- trace_virtio_transport_recv_pkt(src.svm_cid, src.svm_port,
-diff --git a/net/wireless/core.h b/net/wireless/core.h
-index 66f2a11..b5cf218 100644
---- a/net/wireless/core.h
-+++ b/net/wireless/core.h
-@@ -410,6 +410,7 @@ void cfg80211_sme_disassoc(struct wireless_dev *wdev);
- void cfg80211_sme_deauth(struct wireless_dev *wdev);
- void cfg80211_sme_auth_timeout(struct wireless_dev *wdev);
- void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev);
-+void cfg80211_sme_abandon_assoc(struct wireless_dev *wdev);
-
- /* internal helpers */
- bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher);
-diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
-index c284d88..2a62ef6 100644
---- a/net/wireless/mlme.c
-+++ b/net/wireless/mlme.c
-@@ -149,6 +149,18 @@ void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss)
- }
- EXPORT_SYMBOL(cfg80211_assoc_timeout);
-
-+void cfg80211_abandon_assoc(struct net_device *dev, struct cfg80211_bss *bss)
-+{
-+ struct wireless_dev *wdev = dev->ieee80211_ptr;
-+ struct wiphy *wiphy = wdev->wiphy;
-+
-+ cfg80211_sme_abandon_assoc(wdev);
-+
-+ cfg80211_unhold_bss(bss_from_pub(bss));
-+ cfg80211_put_bss(wiphy, bss);
-+}
-+EXPORT_SYMBOL(cfg80211_abandon_assoc);
-+
- void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len)
- {
- struct wireless_dev *wdev = dev->ieee80211_ptr;
-diff --git a/net/wireless/sme.c b/net/wireless/sme.c
-index add6824..95c713c 100644
---- a/net/wireless/sme.c
-+++ b/net/wireless/sme.c
-@@ -39,6 +39,7 @@ struct cfg80211_conn {
- CFG80211_CONN_ASSOCIATING,
- CFG80211_CONN_ASSOC_FAILED,
- CFG80211_CONN_DEAUTH,
-+ CFG80211_CONN_ABANDON,
- CFG80211_CONN_CONNECTED,
- } state;
- u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
-@@ -206,6 +207,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
- cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
- NULL, 0,
- WLAN_REASON_DEAUTH_LEAVING, false);
-+ /* fall through */
-+ case CFG80211_CONN_ABANDON:
- /* free directly, disconnected event already sent */
- cfg80211_sme_free(wdev);
- return 0;
-@@ -423,6 +426,17 @@ void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
- schedule_work(&rdev->conn_work);
- }
-
-+void cfg80211_sme_abandon_assoc(struct wireless_dev *wdev)
-+{
-+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-+
-+ if (!wdev->conn)
-+ return;
-+
-+ wdev->conn->state = CFG80211_CONN_ABANDON;
-+ schedule_work(&rdev->conn_work);
-+}
-+
- static int cfg80211_sme_get_conn_ies(struct wireless_dev *wdev,
- const u8 *ies, size_t ies_len,
- const u8 **out_ies, size_t *out_ies_len)
-diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
-index 8275f0e5..4b2f44c 100644
---- a/scripts/kconfig/nconf.gui.c
-+++ b/scripts/kconfig/nconf.gui.c
-@@ -364,12 +364,14 @@ int dialog_inputbox(WINDOW *main_window,
- WINDOW *prompt_win;
- WINDOW *form_win;
- PANEL *panel;
-- int i, x, y;
-+ int i, x, y, lines, columns, win_lines, win_cols;
- int res = -1;
- int cursor_position = strlen(init);
- int cursor_form_win;
- char *result = *resultp;
-
-+ getmaxyx(stdscr, lines, columns);
-+
- if (strlen(init)+1 > *result_len) {
- *result_len = strlen(init)+1;
- *resultp = result = realloc(result, *result_len);
-@@ -386,14 +388,19 @@ int dialog_inputbox(WINDOW *main_window,
- if (title)
- prompt_width = max(prompt_width, strlen(title));
-
-+ win_lines = min(prompt_lines+6, lines-2);
-+ win_cols = min(prompt_width+7, columns-2);
-+ prompt_lines = max(win_lines-6, 0);
-+ prompt_width = max(win_cols-7, 0);
-+
- /* place dialog in middle of screen */
-- y = (getmaxy(stdscr)-(prompt_lines+4))/2;
-- x = (getmaxx(stdscr)-(prompt_width+4))/2;
-+ y = (lines-win_lines)/2;
-+ x = (columns-win_cols)/2;
-
- strncpy(result, init, *result_len);
-
- /* create the windows */
-- win = newwin(prompt_lines+6, prompt_width+7, y, x);
-+ win = newwin(win_lines, win_cols, y, x);
- prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2);
- form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2);
- keypad(form_win, TRUE);
diff --git a/4.8.17/0000_README b/4.9.9/0000_README
index 96fd06a..e2a9385 100644
--- a/4.8.17/0000_README
+++ b/4.9.9/0000_README
@@ -2,11 +2,15 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 1016_linux-4.8.17.patch
+Patch: 1007_linux-4.9.8.patch
From: http://www.kernel.org
-Desc: Linux 4.8.17
+Desc: Linux 4.9.8
-Patch: 4420_grsecurity-3.1-4.8.17-201701151620.patch
+Patch: 1008_linux-4.9.9.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.9
+
+Patch: 4420_grsecurity-3.1-4.9.9-201702122044.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.9.9/1007_linux-4.9.8.patch b/4.9.9/1007_linux-4.9.8.patch
new file mode 100644
index 0000000..a93aab4
--- /dev/null
+++ b/4.9.9/1007_linux-4.9.8.patch
@@ -0,0 +1,2048 @@
+diff --git a/Makefile b/Makefile
+index da704d9..1130803 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 25d1eb4..be7ec5a 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct bcm_sysport_cb *cb;
+- struct netdev_queue *txq;
+ u32 hw_ind;
+
+- txq = netdev_get_tx_queue(ndev, ring->index);
+-
+ /* Compute how many descriptors have been processed since last call */
+ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
+ c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
+@@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+
+ ring->c_index = c_index;
+
+- if (netif_tx_queue_stopped(txq) && pkts_compl)
+- netif_tx_wake_queue(txq);
+-
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+ ring->index, ring->c_index, pkts_compl, bytes_compl);
+@@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+ {
++ struct netdev_queue *txq;
+ unsigned int released;
+ unsigned long flags;
+
++ txq = netdev_get_tx_queue(priv->netdev, ring->index);
++
+ spin_lock_irqsave(&ring->lock, flags);
+ released = __bcm_sysport_tx_reclaim(priv, ring);
++ if (released)
++ netif_tx_wake_queue(txq);
++
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return released;
+ }
+
++/* Locked version of the per-ring TX reclaim, but does not wake the queue */
++static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
++ struct bcm_sysport_tx_ring *ring)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ring->lock, flags);
++ __bcm_sysport_tx_reclaim(priv, ring);
++ spin_unlock_irqrestore(&ring->lock, flags);
++}
++
+ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
+ {
+ struct bcm_sysport_tx_ring *ring =
+@@ -1253,7 +1264,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
+
+- bcm_sysport_tx_reclaim(priv, ring);
++ bcm_sysport_tx_clean(priv, ring);
+
+ kfree(ring->cbs);
+ ring->cbs = NULL;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index fb8bb02..d223e7c 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1740,8 +1740,11 @@ int mlx4_en_start_port(struct net_device *dev)
+ /* Process all completions if exist to prevent
+ * the queues freezing if they are full
+ */
+- for (i = 0; i < priv->rx_ring_num; i++)
++ for (i = 0; i < priv->rx_ring_num; i++) {
++ local_bh_disable();
+ napi_schedule(&priv->rx_cq[i]->napi);
++ local_bh_enable();
++ }
+
+ netif_tx_start_all_queues(dev);
+ netif_device_attach(dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 33495d8..e7b2158 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -193,6 +193,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
+ return false;
+ }
+
++ if (unlikely(page_is_pfmemalloc(dma_info->page)))
++ return false;
++
+ cache->page_cache[cache->tail] = *dma_info;
+ cache->tail = tail_next;
+ return true;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
+index d942a3e..846fd4d 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
+@@ -211,21 +211,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+ /* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+-MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
++MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
+
+ /* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+-MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
++MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
+
+ /* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+-MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
++MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
+
+ /* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+-MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
++MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
+
+ #endif
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index dda5761..f902c4d 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
+ dev_kfree_skb_any(skb_orig);
+ return NETDEV_TX_OK;
+ }
++ dev_consume_skb_any(skb_orig);
+ }
+
+ if (eth_skb_pad(skb)) {
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+index 92bda87..d548f0a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+@@ -314,6 +314,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
+ dev_kfree_skb_any(skb_orig);
+ return NETDEV_TX_OK;
+ }
++ dev_consume_skb_any(skb_orig);
+ }
+ mlxsw_sx_txhdr_construct(skb, &tx_info);
+ /* TX header is consumed by HW on the way so we shouldn't count its
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index d6a2178..862f18e 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1508,6 +1508,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+ entry / NUM_TX_DESC * DPTR_ALIGN;
+ len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
++ /* Zero length DMA descriptors are problematic as they seem to
++ * terminate DMA transfers. Avoid them by simply using a length of
++ * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
++ *
++ * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
++ * data by the call to skb_put_padto() above this is safe with
++ * respect to both the length of the first DMA descriptor (len)
++ * overflowing the available data and the length of the second DMA
++ * descriptor (skb->len - len) being negative.
++ */
++ if (len == 0)
++ len = DPTR_ALIGN;
++
+ memcpy(buffer, skb->data, len);
+ dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index c9140c3..ff038e5 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
+ * policy filters on the host). Deliver these via the VF
+ * interface in the guest.
+ */
++ rcu_read_lock();
+ vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
+ if (vf_netdev && (vf_netdev->flags & IFF_UP))
+ net = vf_netdev;
+@@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
+ skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
+ if (unlikely(!skb)) {
+ ++net->stats.rx_dropped;
++ rcu_read_unlock();
+ return NVSP_STAT_FAIL;
+ }
+
+@@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
+ * TODO - use NAPI?
+ */
+ netif_rx(skb);
++ rcu_read_unlock();
+
+ return 0;
+ }
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 7869b06..6f38daf 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -827,7 +827,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ return -EINVAL;
+
+ ret = virtio_net_hdr_from_skb(skb, &vnet_hdr,
+- macvtap_is_little_endian(q));
++ macvtap_is_little_endian(q), true);
+ if (ret)
+ BUG();
+
+diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
+index e741bf6..b0492ef 100644
+--- a/drivers/net/phy/bcm63xx.c
++++ b/drivers/net/phy/bcm63xx.c
+@@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
+ MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+ MODULE_LICENSE("GPL");
+
++static int bcm63xx_config_intr(struct phy_device *phydev)
++{
++ int reg, err;
++
++ reg = phy_read(phydev, MII_BCM63XX_IR);
++ if (reg < 0)
++ return reg;
++
++ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
++ reg &= ~MII_BCM63XX_IR_GMASK;
++ else
++ reg |= MII_BCM63XX_IR_GMASK;
++
++ err = phy_write(phydev, MII_BCM63XX_IR, reg);
++ return err;
++}
++
+ static int bcm63xx_config_init(struct phy_device *phydev)
+ {
+ int reg, err;
+@@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm_phy_ack_intr,
+- .config_intr = bcm_phy_config_intr,
++ .config_intr = bcm63xx_config_intr,
+ }, {
+ /* same phy as above, with just a different OUI */
+ .phy_id = 0x002bdc00,
+@@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm_phy_ack_intr,
+- .config_intr = bcm_phy_config_intr,
++ .config_intr = bcm63xx_config_intr,
+ } };
+
+ module_phy_driver(bcm63xx_driver);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index db6acec..18402d7 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1374,7 +1374,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
+ return -EINVAL;
+
+ ret = virtio_net_hdr_from_skb(skb, &gso,
+- tun_is_little_endian(tun));
++ tun_is_little_endian(tun), true);
+ if (ret) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+ pr_err("unexpected GSO type: "
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index dd623f6..b82be81 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
+ #define SAMSUNG_VENDOR_ID 0x04e8
+ #define LENOVO_VENDOR_ID 0x17ef
+ #define NVIDIA_VENDOR_ID 0x0955
++#define HP_VENDOR_ID 0x03f0
+
+ static const struct usb_device_id products[] = {
+ /* BLACKLIST !!
+@@ -677,6 +678,13 @@ static const struct usb_device_id products[] = {
+ .driver_info = 0,
+ },
+
++/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
++{
++ USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
++ .driver_info = 0,
++},
++
+ /* AnyDATA ADU960S - handled by qmi_wwan */
+ {
+ USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 6fe1cdb..24d5272 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
++ { /* HP lt2523 (Novatel E371) */
++ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
++ USB_CLASS_COMM,
++ USB_CDC_SUBCLASS_ETHERNET,
++ USB_CDC_PROTO_NONE),
++ .driver_info = (unsigned long)&qmi_wwan_info,
++ },
+ { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index 4b5cb16..90b426c 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -32,7 +32,7 @@
+ #define NETNEXT_VERSION "08"
+
+ /* Information for net */
+-#define NET_VERSION "6"
++#define NET_VERSION "7"
+
+ #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
+ #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
+@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
+ u8 checksum = CHECKSUM_NONE;
+ u32 opts2, opts3;
+
+- if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
++ if (!(tp->netdev->features & NETIF_F_RXCSUM))
+ goto return_result;
+
+ opts2 = le32_to_cpu(rx_desc->opts2);
+@@ -3572,6 +3572,8 @@ static bool delay_autosuspend(struct r8152 *tp)
+ */
+ if (!sw_linking && tp->rtl_ops.in_nway(tp))
+ return true;
++ else if (!skb_queue_empty(&tp->tx_queue))
++ return true;
+ else
+ return false;
+ }
+@@ -4358,6 +4360,11 @@ static int rtl8152_probe(struct usb_interface *intf,
+ NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
++ if (tp->version == RTL_VER_01) {
++ netdev->features &= ~NETIF_F_RXCSUM;
++ netdev->hw_features &= ~NETIF_F_RXCSUM;
++ }
++
+ netdev->ethtool_ops = &ops;
+ netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index cbf1c61..51fc0c3 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -840,7 +840,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
+ hdr = skb_vnet_hdr(skb);
+
+ if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
+- virtio_is_little_endian(vi->vdev)))
++ virtio_is_little_endian(vi->vdev), false))
+ BUG();
+
+ if (vi->mergeable_rx_bufs)
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 2ba01ca..0fafaa9 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2887,7 +2887,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+ memcpy(&vxlan->cfg, conf, sizeof(*conf));
+ if (!vxlan->cfg.dst_port) {
+ if (conf->flags & VXLAN_F_GPE)
+- vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
++ vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
+ else
+ vxlan->cfg.dst_port = default_port;
+ }
+diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
+index 5050056..9f06a21 100644
+--- a/fs/xfs/libxfs/xfs_alloc.c
++++ b/fs/xfs/libxfs/xfs_alloc.c
+@@ -95,10 +95,7 @@ unsigned int
+ xfs_alloc_set_aside(
+ struct xfs_mount *mp)
+ {
+- unsigned int blocks;
+-
+- blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
+- return blocks;
++ return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
+ }
+
+ /*
+@@ -365,36 +362,12 @@ xfs_alloc_fix_len(
+ return;
+ ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
+ ASSERT(rlen % args->prod == args->mod);
++ ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
++ rlen + args->minleft);
+ args->len = rlen;
+ }
+
+ /*
+- * Fix up length if there is too little space left in the a.g.
+- * Return 1 if ok, 0 if too little, should give up.
+- */
+-STATIC int
+-xfs_alloc_fix_minleft(
+- xfs_alloc_arg_t *args) /* allocation argument structure */
+-{
+- xfs_agf_t *agf; /* a.g. freelist header */
+- int diff; /* free space difference */
+-
+- if (args->minleft == 0)
+- return 1;
+- agf = XFS_BUF_TO_AGF(args->agbp);
+- diff = be32_to_cpu(agf->agf_freeblks)
+- - args->len - args->minleft;
+- if (diff >= 0)
+- return 1;
+- args->len += diff; /* shrink the allocated space */
+- /* casts to (int) catch length underflows */
+- if ((int)args->len >= (int)args->minlen)
+- return 1;
+- args->agbno = NULLAGBLOCK;
+- return 0;
+-}
+-
+-/*
+ * Update the two btrees, logically removing from freespace the extent
+ * starting at rbno, rlen blocks. The extent is contained within the
+ * actual (current) free extent fbno for flen blocks.
+@@ -689,8 +662,6 @@ xfs_alloc_ag_vextent(
+ xfs_alloc_arg_t *args) /* argument structure for allocation */
+ {
+ int error=0;
+- xfs_extlen_t reservation;
+- xfs_extlen_t oldmax;
+
+ ASSERT(args->minlen > 0);
+ ASSERT(args->maxlen > 0);
+@@ -699,20 +670,6 @@ xfs_alloc_ag_vextent(
+ ASSERT(args->alignment > 0);
+
+ /*
+- * Clamp maxlen to the amount of free space minus any reservations
+- * that have been made.
+- */
+- oldmax = args->maxlen;
+- reservation = xfs_ag_resv_needed(args->pag, args->resv);
+- if (args->maxlen > args->pag->pagf_freeblks - reservation)
+- args->maxlen = args->pag->pagf_freeblks - reservation;
+- if (args->maxlen == 0) {
+- args->agbno = NULLAGBLOCK;
+- args->maxlen = oldmax;
+- return 0;
+- }
+-
+- /*
+ * Branch to correct routine based on the type.
+ */
+ args->wasfromfl = 0;
+@@ -731,8 +688,6 @@ xfs_alloc_ag_vextent(
+ /* NOTREACHED */
+ }
+
+- args->maxlen = oldmax;
+-
+ if (error || args->agbno == NULLAGBLOCK)
+ return error;
+
+@@ -841,9 +796,6 @@ xfs_alloc_ag_vextent_exact(
+ args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
+ - args->agbno;
+ xfs_alloc_fix_len(args);
+- if (!xfs_alloc_fix_minleft(args))
+- goto not_found;
+-
+ ASSERT(args->agbno + args->len <= tend);
+
+ /*
+@@ -1149,12 +1101,7 @@ xfs_alloc_ag_vextent_near(
+ XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
+ ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
+ args->len = blen;
+- if (!xfs_alloc_fix_minleft(args)) {
+- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+- trace_xfs_alloc_near_nominleft(args);
+- return 0;
+- }
+- blen = args->len;
++
+ /*
+ * We are allocating starting at bnew for blen blocks.
+ */
+@@ -1346,12 +1293,6 @@ xfs_alloc_ag_vextent_near(
+ */
+ args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
+ xfs_alloc_fix_len(args);
+- if (!xfs_alloc_fix_minleft(args)) {
+- trace_xfs_alloc_near_nominleft(args);
+- xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
+- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+- return 0;
+- }
+ rlen = args->len;
+ (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
+ args->datatype, ltbnoa, ltlena, &ltnew);
+@@ -1553,8 +1494,6 @@ xfs_alloc_ag_vextent_size(
+ }
+ xfs_alloc_fix_len(args);
+
+- if (!xfs_alloc_fix_minleft(args))
+- goto out_nominleft;
+ rlen = args->len;
+ XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
+ /*
+@@ -2056,7 +1995,7 @@ xfs_alloc_space_available(
+ int flags)
+ {
+ struct xfs_perag *pag = args->pag;
+- xfs_extlen_t longest;
++ xfs_extlen_t alloc_len, longest;
+ xfs_extlen_t reservation; /* blocks that are still reserved */
+ int available;
+
+@@ -2066,17 +2005,28 @@ xfs_alloc_space_available(
+ reservation = xfs_ag_resv_needed(pag, args->resv);
+
+ /* do we have enough contiguous free space for the allocation? */
++ alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
+ longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
+ reservation);
+- if ((args->minlen + args->alignment + args->minalignslop - 1) > longest)
++ if (longest < alloc_len)
+ return false;
+
+ /* do we have enough free space remaining for the allocation? */
+ available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
+- reservation - min_free - args->total);
+- if (available < (int)args->minleft || available <= 0)
++ reservation - min_free - args->minleft);
++ if (available < (int)max(args->total, alloc_len))
+ return false;
+
++ /*
++ * Clamp maxlen to the amount of free space available for the actual
++ * extent allocation.
++ */
++ if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
++ args->maxlen = available;
++ ASSERT(args->maxlen > 0);
++ ASSERT(args->maxlen >= args->minlen);
++ }
++
+ return true;
+ }
+
+@@ -2122,7 +2072,8 @@ xfs_alloc_fix_freelist(
+ }
+
+ need = xfs_alloc_min_freelist(mp, pag);
+- if (!xfs_alloc_space_available(args, need, flags))
++ if (!xfs_alloc_space_available(args, need, flags |
++ XFS_ALLOC_FLAG_CHECK))
+ goto out_agbp_relse;
+
+ /*
+@@ -2638,12 +2589,10 @@ xfs_alloc_vextent(
+ xfs_agblock_t agsize; /* allocation group size */
+ int error;
+ int flags; /* XFS_ALLOC_FLAG_... locking flags */
+- xfs_extlen_t minleft;/* minimum left value, temp copy */
+ xfs_mount_t *mp; /* mount structure pointer */
+ xfs_agnumber_t sagno; /* starting allocation group number */
+ xfs_alloctype_t type; /* input allocation type */
+ int bump_rotor = 0;
+- int no_min = 0;
+ xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
+
+ mp = args->mp;
+@@ -2672,7 +2621,6 @@ xfs_alloc_vextent(
+ trace_xfs_alloc_vextent_badargs(args);
+ return 0;
+ }
+- minleft = args->minleft;
+
+ switch (type) {
+ case XFS_ALLOCTYPE_THIS_AG:
+@@ -2683,9 +2631,7 @@ xfs_alloc_vextent(
+ */
+ args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
+ args->pag = xfs_perag_get(mp, args->agno);
+- args->minleft = 0;
+ error = xfs_alloc_fix_freelist(args, 0);
+- args->minleft = minleft;
+ if (error) {
+ trace_xfs_alloc_vextent_nofix(args);
+ goto error0;
+@@ -2750,9 +2696,7 @@ xfs_alloc_vextent(
+ */
+ for (;;) {
+ args->pag = xfs_perag_get(mp, args->agno);
+- if (no_min) args->minleft = 0;
+ error = xfs_alloc_fix_freelist(args, flags);
+- args->minleft = minleft;
+ if (error) {
+ trace_xfs_alloc_vextent_nofix(args);
+ goto error0;
+@@ -2792,20 +2736,17 @@ xfs_alloc_vextent(
+ * or switch to non-trylock mode.
+ */
+ if (args->agno == sagno) {
+- if (no_min == 1) {
++ if (flags == 0) {
+ args->agbno = NULLAGBLOCK;
+ trace_xfs_alloc_vextent_allfailed(args);
+ break;
+ }
+- if (flags == 0) {
+- no_min = 1;
+- } else {
+- flags = 0;
+- if (type == XFS_ALLOCTYPE_START_BNO) {
+- args->agbno = XFS_FSB_TO_AGBNO(mp,
+- args->fsbno);
+- args->type = XFS_ALLOCTYPE_NEAR_BNO;
+- }
++
++ flags = 0;
++ if (type == XFS_ALLOCTYPE_START_BNO) {
++ args->agbno = XFS_FSB_TO_AGBNO(mp,
++ args->fsbno);
++ args->type = XFS_ALLOCTYPE_NEAR_BNO;
+ }
+ }
+ xfs_perag_put(args->pag);
+diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
+index 7c404a6..1d0f48a 100644
+--- a/fs/xfs/libxfs/xfs_alloc.h
++++ b/fs/xfs/libxfs/xfs_alloc.h
+@@ -56,7 +56,7 @@ typedef unsigned int xfs_alloctype_t;
+ #define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
+ #define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */
+ #define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */
+-
++#define XFS_ALLOC_FLAG_CHECK 0x00000010 /* test only, don't modify args */
+
+ /*
+ * Argument structure for xfs_alloc routines.
+diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
+index af1ecb1..6622d46 100644
+--- a/fs/xfs/libxfs/xfs_attr.c
++++ b/fs/xfs/libxfs/xfs_attr.c
+@@ -131,9 +131,6 @@ xfs_attr_get(
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ return -EIO;
+
+- if (!xfs_inode_hasattr(ip))
+- return -ENOATTR;
+-
+ error = xfs_attr_args_init(&args, ip, name, flags);
+ if (error)
+ return error;
+@@ -392,9 +389,6 @@ xfs_attr_remove(
+ if (XFS_FORCED_SHUTDOWN(dp->i_mount))
+ return -EIO;
+
+- if (!xfs_inode_hasattr(dp))
+- return -ENOATTR;
+-
+ error = xfs_attr_args_init(&args, dp, name, flags);
+ if (error)
+ return error;
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 89d727b..f52fd63 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -3720,7 +3720,7 @@ xfs_bmap_btalloc(
+ align = xfs_get_cowextsz_hint(ap->ip);
+ else if (xfs_alloc_is_userdata(ap->datatype))
+ align = xfs_get_extsz_hint(ap->ip);
+- if (unlikely(align)) {
++ if (align) {
+ error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
+ align, 0, ap->eof, 0, ap->conv,
+ &ap->offset, &ap->length);
+@@ -3792,7 +3792,7 @@ xfs_bmap_btalloc(
+ args.minlen = ap->minlen;
+ }
+ /* apply extent size hints if obtained earlier */
+- if (unlikely(align)) {
++ if (align) {
+ args.prod = align;
+ if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
+ args.mod = (xfs_extlen_t)(args.prod - args.mod);
+@@ -3903,7 +3903,6 @@ xfs_bmap_btalloc(
+ args.fsbno = 0;
+ args.type = XFS_ALLOCTYPE_FIRST_AG;
+ args.total = ap->minlen;
+- args.minleft = 0;
+ if ((error = xfs_alloc_vextent(&args)))
+ return error;
+ ap->dfops->dop_low = true;
+@@ -4437,8 +4436,6 @@ xfs_bmapi_allocate(
+ if (error)
+ return error;
+
+- if (bma->dfops->dop_low)
+- bma->minleft = 0;
+ if (bma->cur)
+ bma->cur->bc_private.b.firstblock = *bma->firstblock;
+ if (bma->blkno == NULLFSBLOCK)
+@@ -4610,8 +4607,6 @@ xfs_bmapi_write(
+ int n; /* current extent index */
+ xfs_fileoff_t obno; /* old block number (offset) */
+ int whichfork; /* data or attr fork */
+- char inhole; /* current location is hole in file */
+- char wasdelay; /* old extent was delayed */
+
+ #ifdef DEBUG
+ xfs_fileoff_t orig_bno; /* original block number value */
+@@ -4697,22 +4692,44 @@ xfs_bmapi_write(
+ bma.firstblock = firstblock;
+
+ while (bno < end && n < *nmap) {
+- inhole = eof || bma.got.br_startoff > bno;
+- wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
++ bool need_alloc = false, wasdelay = false;
+
+- /*
+- * Make sure we only reflink into a hole.
+- */
+- if (flags & XFS_BMAPI_REMAP)
+- ASSERT(inhole);
+- if (flags & XFS_BMAPI_COWFORK)
+- ASSERT(!inhole);
++ /* in hole or beyoned EOF? */
++ if (eof || bma.got.br_startoff > bno) {
++ if (flags & XFS_BMAPI_DELALLOC) {
++ /*
++ * For the COW fork we can reasonably get a
++ * request for converting an extent that races
++ * with other threads already having converted
++ * part of it, as there converting COW to
++ * regular blocks is not protected using the
++ * IOLOCK.
++ */
++ ASSERT(flags & XFS_BMAPI_COWFORK);
++ if (!(flags & XFS_BMAPI_COWFORK)) {
++ error = -EIO;
++ goto error0;
++ }
++
++ if (eof || bno >= end)
++ break;
++ } else {
++ need_alloc = true;
++ }
++ } else {
++ /*
++ * Make sure we only reflink into a hole.
++ */
++ ASSERT(!(flags & XFS_BMAPI_REMAP));
++ if (isnullstartblock(bma.got.br_startblock))
++ wasdelay = true;
++ }
+
+ /*
+ * First, deal with the hole before the allocated space
+ * that we found, if any.
+ */
+- if (inhole || wasdelay) {
++ if (need_alloc || wasdelay) {
+ bma.eof = eof;
+ bma.conv = !!(flags & XFS_BMAPI_CONVERT);
+ bma.wasdel = wasdelay;
+diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
+index d6d175a..e7d40b3 100644
+--- a/fs/xfs/libxfs/xfs_bmap.h
++++ b/fs/xfs/libxfs/xfs_bmap.h
+@@ -110,6 +110,9 @@ struct xfs_extent_free_item
+ /* Map something in the CoW fork. */
+ #define XFS_BMAPI_COWFORK 0x200
+
++/* Only convert delalloc space, don't allocate entirely new extents */
++#define XFS_BMAPI_DELALLOC 0x400
++
+ #define XFS_BMAPI_FLAGS \
+ { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
+ { XFS_BMAPI_METADATA, "METADATA" }, \
+@@ -120,7 +123,8 @@ struct xfs_extent_free_item
+ { XFS_BMAPI_CONVERT, "CONVERT" }, \
+ { XFS_BMAPI_ZERO, "ZERO" }, \
+ { XFS_BMAPI_REMAP, "REMAP" }, \
+- { XFS_BMAPI_COWFORK, "COWFORK" }
++ { XFS_BMAPI_COWFORK, "COWFORK" }, \
++ { XFS_BMAPI_DELALLOC, "DELALLOC" }
+
+
+ static inline int xfs_bmapi_aflag(int w)
+diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
+index 049fa59..f76c169 100644
+--- a/fs/xfs/libxfs/xfs_bmap_btree.c
++++ b/fs/xfs/libxfs/xfs_bmap_btree.c
+@@ -502,12 +502,11 @@ xfs_bmbt_alloc_block(
+ if (args.fsbno == NULLFSBLOCK && args.minleft) {
+ /*
+ * Could not find an AG with enough free space to satisfy
+- * a full btree split. Try again without minleft and if
++ * a full btree split. Try again and if
+ * successful activate the lowspace algorithm.
+ */
+ args.fsbno = 0;
+ args.type = XFS_ALLOCTYPE_FIRST_AG;
+- args.minleft = 0;
+ error = xfs_alloc_vextent(&args);
+ if (error)
+ goto error0;
+diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
+index 20a96dd..7825d78 100644
+--- a/fs/xfs/libxfs/xfs_dir2.c
++++ b/fs/xfs/libxfs/xfs_dir2.c
+@@ -36,21 +36,29 @@
+ struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
+
+ /*
+- * @mode, if set, indicates that the type field needs to be set up.
+- * This uses the transformation from file mode to DT_* as defined in linux/fs.h
+- * for file type specification. This will be propagated into the directory
+- * structure if appropriate for the given operation and filesystem config.
++ * Convert inode mode to directory entry filetype
+ */
+-const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
+- [0] = XFS_DIR3_FT_UNKNOWN,
+- [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE,
+- [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR,
+- [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV,
+- [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV,
+- [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO,
+- [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK,
+- [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK,
+-};
++unsigned char xfs_mode_to_ftype(int mode)
++{
++ switch (mode & S_IFMT) {
++ case S_IFREG:
++ return XFS_DIR3_FT_REG_FILE;
++ case S_IFDIR:
++ return XFS_DIR3_FT_DIR;
++ case S_IFCHR:
++ return XFS_DIR3_FT_CHRDEV;
++ case S_IFBLK:
++ return XFS_DIR3_FT_BLKDEV;
++ case S_IFIFO:
++ return XFS_DIR3_FT_FIFO;
++ case S_IFSOCK:
++ return XFS_DIR3_FT_SOCK;
++ case S_IFLNK:
++ return XFS_DIR3_FT_SYMLINK;
++ default:
++ return XFS_DIR3_FT_UNKNOWN;
++ }
++}
+
+ /*
+ * ASCII case-insensitive (ie. A-Z) support for directories that was
+@@ -631,7 +639,8 @@ xfs_dir2_isblock(
+ if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
+ return rval;
+ rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
+- ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize);
++ if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
++ return -EFSCORRUPTED;
+ *vp = rval;
+ return 0;
+ }
+diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
+index becc926..ae0d55b 100644
+--- a/fs/xfs/libxfs/xfs_dir2.h
++++ b/fs/xfs/libxfs/xfs_dir2.h
+@@ -18,6 +18,9 @@
+ #ifndef __XFS_DIR2_H__
+ #define __XFS_DIR2_H__
+
++#include "xfs_da_format.h"
++#include "xfs_da_btree.h"
++
+ struct xfs_defer_ops;
+ struct xfs_da_args;
+ struct xfs_inode;
+@@ -32,10 +35,9 @@ struct xfs_dir2_data_unused;
+ extern struct xfs_name xfs_name_dotdot;
+
+ /*
+- * directory filetype conversion tables.
++ * Convert inode mode to directory entry filetype
+ */
+-#define S_SHIFT 12
+-extern const unsigned char xfs_mode_to_ftype[];
++extern unsigned char xfs_mode_to_ftype(int mode);
+
+ /*
+ * directory operations vector for encode/decode routines
+diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
+index c906e50..37ee7f0 100644
+--- a/fs/xfs/libxfs/xfs_inode_buf.c
++++ b/fs/xfs/libxfs/xfs_inode_buf.c
+@@ -29,6 +29,7 @@
+ #include "xfs_icache.h"
+ #include "xfs_trans.h"
+ #include "xfs_ialloc.h"
++#include "xfs_dir2.h"
+
+ /*
+ * Check that none of the inode's in the buffer have a next
+@@ -386,6 +387,7 @@ xfs_dinode_verify(
+ struct xfs_inode *ip,
+ struct xfs_dinode *dip)
+ {
++ uint16_t mode;
+ uint16_t flags;
+ uint64_t flags2;
+
+@@ -396,8 +398,12 @@ xfs_dinode_verify(
+ if (be64_to_cpu(dip->di_size) & (1ULL << 63))
+ return false;
+
+- /* No zero-length symlinks. */
+- if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
++ mode = be16_to_cpu(dip->di_mode);
++ if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
++ return false;
++
++ /* No zero-length symlinks/dirs. */
++ if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
+ return false;
+
+ /* only version 3 or greater inodes are extensively verified here */
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index 2580262..584ec89 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -242,7 +242,7 @@ xfs_mount_validate_sb(
+ sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
+ sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
+ sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
+- sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG ||
++ sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
+ sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
+ sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
+ sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
+diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
+index 0670a8b..efb8ccd 100644
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -528,7 +528,6 @@ xfs_getbmap(
+ xfs_bmbt_irec_t *map; /* buffer for user's data */
+ xfs_mount_t *mp; /* file system mount point */
+ int nex; /* # of user extents can do */
+- int nexleft; /* # of user extents left */
+ int subnex; /* # of bmapi's can do */
+ int nmap; /* number of map entries */
+ struct getbmapx *out; /* output structure */
+@@ -686,10 +685,8 @@ xfs_getbmap(
+ goto out_free_map;
+ }
+
+- nexleft = nex;
+-
+ do {
+- nmap = (nexleft > subnex) ? subnex : nexleft;
++ nmap = (nex> subnex) ? subnex : nex;
+ error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
+ XFS_BB_TO_FSB(mp, bmv->bmv_length),
+ map, &nmap, bmapi_flags);
+@@ -697,8 +694,8 @@ xfs_getbmap(
+ goto out_free_map;
+ ASSERT(nmap <= subnex);
+
+- for (i = 0; i < nmap && nexleft && bmv->bmv_length &&
+- cur_ext < bmv->bmv_count; i++) {
++ for (i = 0; i < nmap && bmv->bmv_length &&
++ cur_ext < bmv->bmv_count - 1; i++) {
+ out[cur_ext].bmv_oflags = 0;
+ if (map[i].br_state == XFS_EXT_UNWRITTEN)
+ out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
+@@ -760,16 +757,27 @@ xfs_getbmap(
+ continue;
+ }
+
++ /*
++ * In order to report shared extents accurately,
++ * we report each distinct shared/unshared part
++ * of a single bmbt record using multiple bmap
++ * extents. To make that happen, we iterate the
++ * same map array item multiple times, each
++ * time trimming out the subextent that we just
++ * reported.
++ *
++ * Because of this, we must check the out array
++ * index (cur_ext) directly against bmv_count-1
++ * to avoid overflows.
++ */
+ if (inject_map.br_startblock != NULLFSBLOCK) {
+ map[i] = inject_map;
+ i--;
+- } else
+- nexleft--;
++ }
+ bmv->bmv_entries++;
+ cur_ext++;
+ }
+- } while (nmap && nexleft && bmv->bmv_length &&
+- cur_ext < bmv->bmv_count);
++ } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
+
+ out_free_map:
+ kmem_free(map);
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index b5b9bff..d7a67d7 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -423,6 +423,7 @@ xfs_buf_allocate_memory(
+ out_free_pages:
+ for (i = 0; i < bp->b_page_count; i++)
+ __free_page(bp->b_pages[i]);
++ bp->b_flags &= ~_XBF_PAGES;
+ return error;
+ }
+
+diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
+index 7a30b8f..9d06cc3 100644
+--- a/fs/xfs/xfs_dquot.c
++++ b/fs/xfs/xfs_dquot.c
+@@ -710,6 +710,10 @@ xfs_dq_get_next_id(
+ /* Simple advance */
+ next_id = *id + 1;
+
++ /* If we'd wrap past the max ID, stop */
++ if (next_id < *id)
++ return -ENOENT;
++
+ /* If new ID is within the current chunk, advancing it sufficed */
+ if (next_id % mp->m_quotainfo->qi_dqperchunk) {
+ *id = next_id;
+diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
+index 15a83813..cdc6bdd 100644
+--- a/fs/xfs/xfs_iomap.c
++++ b/fs/xfs/xfs_iomap.c
+@@ -681,7 +681,7 @@ xfs_iomap_write_allocate(
+ xfs_trans_t *tp;
+ int nimaps;
+ int error = 0;
+- int flags = 0;
++ int flags = XFS_BMAPI_DELALLOC;
+ int nres;
+
+ if (whichfork == XFS_COW_FORK)
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index 405a65c..f5e0f60 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -98,12 +98,27 @@ xfs_init_security(
+ static void
+ xfs_dentry_to_name(
+ struct xfs_name *namep,
++ struct dentry *dentry)
++{
++ namep->name = dentry->d_name.name;
++ namep->len = dentry->d_name.len;
++ namep->type = XFS_DIR3_FT_UNKNOWN;
++}
++
++static int
++xfs_dentry_mode_to_name(
++ struct xfs_name *namep,
+ struct dentry *dentry,
+ int mode)
+ {
+ namep->name = dentry->d_name.name;
+ namep->len = dentry->d_name.len;
+- namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT];
++ namep->type = xfs_mode_to_ftype(mode);
++
++ if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
++ return -EFSCORRUPTED;
++
++ return 0;
+ }
+
+ STATIC void
+@@ -119,7 +134,7 @@ xfs_cleanup_inode(
+ * xfs_init_security we must back out.
+ * ENOSPC can hit here, among other things.
+ */
+- xfs_dentry_to_name(&teardown, dentry, 0);
++ xfs_dentry_to_name(&teardown, dentry);
+
+ xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
+ }
+@@ -154,8 +169,12 @@ xfs_generic_create(
+ if (error)
+ return error;
+
++ /* Verify mode is valid also for tmpfile case */
++ error = xfs_dentry_mode_to_name(&name, dentry, mode);
++ if (unlikely(error))
++ goto out_free_acl;
++
+ if (!tmpfile) {
+- xfs_dentry_to_name(&name, dentry, mode);
+ error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
+ } else {
+ error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
+@@ -248,7 +267,7 @@ xfs_vn_lookup(
+ if (dentry->d_name.len >= MAXNAMELEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+- xfs_dentry_to_name(&name, dentry, 0);
++ xfs_dentry_to_name(&name, dentry);
+ error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
+ if (unlikely(error)) {
+ if (unlikely(error != -ENOENT))
+@@ -275,7 +294,7 @@ xfs_vn_ci_lookup(
+ if (dentry->d_name.len >= MAXNAMELEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+- xfs_dentry_to_name(&xname, dentry, 0);
++ xfs_dentry_to_name(&xname, dentry);
+ error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
+ if (unlikely(error)) {
+ if (unlikely(error != -ENOENT))
+@@ -310,7 +329,9 @@ xfs_vn_link(
+ struct xfs_name name;
+ int error;
+
+- xfs_dentry_to_name(&name, dentry, inode->i_mode);
++ error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
++ if (unlikely(error))
++ return error;
+
+ error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
+ if (unlikely(error))
+@@ -329,7 +350,7 @@ xfs_vn_unlink(
+ struct xfs_name name;
+ int error;
+
+- xfs_dentry_to_name(&name, dentry, 0);
++ xfs_dentry_to_name(&name, dentry);
+
+ error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
+ if (error)
+@@ -359,7 +380,9 @@ xfs_vn_symlink(
+
+ mode = S_IFLNK |
+ (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
+- xfs_dentry_to_name(&name, dentry, mode);
++ error = xfs_dentry_mode_to_name(&name, dentry, mode);
++ if (unlikely(error))
++ goto out;
+
+ error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
+ if (unlikely(error))
+@@ -395,6 +418,7 @@ xfs_vn_rename(
+ {
+ struct inode *new_inode = d_inode(ndentry);
+ int omode = 0;
++ int error;
+ struct xfs_name oname;
+ struct xfs_name nname;
+
+@@ -405,8 +429,14 @@ xfs_vn_rename(
+ if (flags & RENAME_EXCHANGE)
+ omode = d_inode(ndentry)->i_mode;
+
+- xfs_dentry_to_name(&oname, odentry, omode);
+- xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode);
++ error = xfs_dentry_mode_to_name(&oname, odentry, omode);
++ if (omode && unlikely(error))
++ return error;
++
++ error = xfs_dentry_mode_to_name(&nname, ndentry,
++ d_inode(odentry)->i_mode);
++ if (unlikely(error))
++ return error;
+
+ return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
+ XFS_I(ndir), &nname,
+diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
+index 68640fb..1455b2520 100644
+--- a/fs/xfs/xfs_linux.h
++++ b/fs/xfs/xfs_linux.h
+@@ -330,11 +330,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
+ }
+
+ #define ASSERT_ALWAYS(expr) \
+- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
++ (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+
+ #ifdef DEBUG
+ #define ASSERT(expr) \
+- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
++ (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+
+ #ifndef STATIC
+ # define STATIC noinline
+@@ -345,7 +345,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
+ #ifdef XFS_WARN
+
+ #define ASSERT(expr) \
+- (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
++ (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
+
+ #ifndef STATIC
+ # define STATIC static noinline
+diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
+index 3b74fa0..4017aa9 100644
+--- a/fs/xfs/xfs_log.c
++++ b/fs/xfs/xfs_log.c
+@@ -3324,12 +3324,8 @@ xfs_log_force(
+ xfs_mount_t *mp,
+ uint flags)
+ {
+- int error;
+-
+ trace_xfs_log_force(mp, 0, _RET_IP_);
+- error = _xfs_log_force(mp, flags, NULL);
+- if (error)
+- xfs_warn(mp, "%s: error %d returned.", __func__, error);
++ _xfs_log_force(mp, flags, NULL);
+ }
+
+ /*
+@@ -3473,12 +3469,8 @@ xfs_log_force_lsn(
+ xfs_lsn_t lsn,
+ uint flags)
+ {
+- int error;
+-
+ trace_xfs_log_force(mp, lsn, _RET_IP_);
+- error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
+- if (error)
+- xfs_warn(mp, "%s: error %d returned.", __func__, error);
++ _xfs_log_force_lsn(mp, lsn, flags, NULL);
+ }
+
+ /*
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index a17ae7b..647532b 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
+
+ /* TCP Fast Open Cookie as stored in memory */
+ struct tcp_fastopen_cookie {
++ union {
++ u8 val[TCP_FASTOPEN_COOKIE_MAX];
++#if IS_ENABLED(CONFIG_IPV6)
++ struct in6_addr addr;
++#endif
++ };
+ s8 len;
+- u8 val[TCP_FASTOPEN_COOKIE_MAX];
+ bool exp; /* In RFC6994 experimental option format */
+ };
+
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index 1c912f8..f211c34 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+
+ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
+ struct virtio_net_hdr *hdr,
+- bool little_endian)
++ bool little_endian,
++ bool has_data_valid)
+ {
+ memset(hdr, 0, sizeof(*hdr));
+
+@@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
+ skb_checksum_start_offset(skb));
+ hdr->csum_offset = __cpu_to_virtio16(little_endian,
+ skb->csum_offset);
+- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
++ } else if (has_data_valid &&
++ skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
+ } /* else everything is zero */
+
+diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
+index ea3f80f..fc7c0db 100644
+--- a/include/net/lwtunnel.h
++++ b/include/net/lwtunnel.h
+@@ -43,13 +43,12 @@ struct lwtunnel_encap_ops {
+ int (*get_encap_size)(struct lwtunnel_state *lwtstate);
+ int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+ int (*xmit)(struct sk_buff *skb);
++
++ struct module *owner;
+ };
+
+ #ifdef CONFIG_LWTUNNEL
+-static inline void lwtstate_free(struct lwtunnel_state *lws)
+-{
+- kfree(lws);
+-}
++void lwtstate_free(struct lwtunnel_state *lws);
+
+ static inline struct lwtunnel_state *
+ lwtstate_get(struct lwtunnel_state *lws)
+@@ -106,6 +105,8 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
+ int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ unsigned int num);
++int lwtunnel_valid_encap_type(u16 encap_type);
++int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
+ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap,
+ unsigned int family, const void *cfg,
+@@ -169,6 +170,15 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+ return -EOPNOTSUPP;
+ }
+
++static inline int lwtunnel_valid_encap_type(u16 encap_type)
++{
++ return -EOPNOTSUPP;
++}
++static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
++{
++ return -EOPNOTSUPP;
++}
++
+ static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ struct nlattr *encap,
+ unsigned int family, const void *cfg,
+diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
+index 655a7d4..983f0b5 100644
+--- a/net/ax25/ax25_subr.c
++++ b/net/ax25/ax25_subr.c
+@@ -264,7 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
+ {
+ ax25_clear_queues(ax25);
+
+- if (!sock_flag(ax25->sk, SOCK_DESTROY))
++ if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
+ ax25_stop_heartbeat(ax25);
+ ax25_stop_t1timer(ax25);
+ ax25_stop_t2timer(ax25);
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index e99037c..0474106 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -781,20 +781,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
+ return 0;
+ }
+
+-static int br_dev_newlink(struct net *src_net, struct net_device *dev,
+- struct nlattr *tb[], struct nlattr *data[])
+-{
+- struct net_bridge *br = netdev_priv(dev);
+-
+- if (tb[IFLA_ADDRESS]) {
+- spin_lock_bh(&br->lock);
+- br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
+- spin_unlock_bh(&br->lock);
+- }
+-
+- return register_netdevice(dev);
+-}
+-
+ static int br_port_slave_changelink(struct net_device *brdev,
+ struct net_device *dev,
+ struct nlattr *tb[],
+@@ -1093,6 +1079,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
+ return 0;
+ }
+
++static int br_dev_newlink(struct net *src_net, struct net_device *dev,
++ struct nlattr *tb[], struct nlattr *data[])
++{
++ struct net_bridge *br = netdev_priv(dev);
++ int err;
++
++ if (tb[IFLA_ADDRESS]) {
++ spin_lock_bh(&br->lock);
++ br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
++ spin_unlock_bh(&br->lock);
++ }
++
++ err = br_changelink(dev, tb, data);
++ if (err)
++ return err;
++
++ return register_netdevice(dev);
++}
++
+ static size_t br_get_size(const struct net_device *brdev)
+ {
+ return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
+diff --git a/net/core/dev.c b/net/core/dev.c
+index e1d731f..df51c50 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2815,9 +2815,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
+ if (skb->ip_summed != CHECKSUM_NONE &&
+ !can_checksum_protocol(features, type)) {
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+- } else if (illegal_highdma(skb->dev, skb)) {
+- features &= ~NETIF_F_SG;
+ }
++ if (illegal_highdma(skb->dev, skb))
++ features &= ~NETIF_F_SG;
+
+ return features;
+ }
+diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
+index e5f84c2..afa64f0 100644
+--- a/net/core/lwtunnel.c
++++ b/net/core/lwtunnel.c
+@@ -26,6 +26,7 @@
+ #include <net/lwtunnel.h>
+ #include <net/rtnetlink.h>
+ #include <net/ip6_fib.h>
++#include <net/nexthop.h>
+
+ #ifdef CONFIG_MODULES
+
+@@ -65,6 +66,15 @@ EXPORT_SYMBOL(lwtunnel_state_alloc);
+ static const struct lwtunnel_encap_ops __rcu *
+ lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
+
++void lwtstate_free(struct lwtunnel_state *lws)
++{
++ const struct lwtunnel_encap_ops *ops = lwtun_encaps[lws->type];
++
++ kfree(lws);
++ module_put(ops->owner);
++}
++EXPORT_SYMBOL(lwtstate_free);
++
+ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
+ unsigned int num)
+ {
+@@ -110,25 +120,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+ ret = -EOPNOTSUPP;
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[encap_type]);
++ if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
++ ret = ops->build_state(dev, encap, family, cfg, lws);
++ if (ret)
++ module_put(ops->owner);
++ }
++ rcu_read_unlock();
++
++ return ret;
++}
++EXPORT_SYMBOL(lwtunnel_build_state);
++
++int lwtunnel_valid_encap_type(u16 encap_type)
++{
++ const struct lwtunnel_encap_ops *ops;
++ int ret = -EINVAL;
++
++ if (encap_type == LWTUNNEL_ENCAP_NONE ||
++ encap_type > LWTUNNEL_ENCAP_MAX)
++ return ret;
++
++ rcu_read_lock();
++ ops = rcu_dereference(lwtun_encaps[encap_type]);
++ rcu_read_unlock();
+ #ifdef CONFIG_MODULES
+ if (!ops) {
+ const char *encap_type_str = lwtunnel_encap_str(encap_type);
+
+ if (encap_type_str) {
+- rcu_read_unlock();
++ __rtnl_unlock();
+ request_module("rtnl-lwt-%s", encap_type_str);
++ rtnl_lock();
++
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[encap_type]);
++ rcu_read_unlock();
+ }
+ }
+ #endif
+- if (likely(ops && ops->build_state))
+- ret = ops->build_state(dev, encap, family, cfg, lws);
+- rcu_read_unlock();
++ return ops ? 0 : -EOPNOTSUPP;
++}
++EXPORT_SYMBOL(lwtunnel_valid_encap_type);
+
+- return ret;
++int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
++{
++ struct rtnexthop *rtnh = (struct rtnexthop *)attr;
++ struct nlattr *nla_entype;
++ struct nlattr *attrs;
++ struct nlattr *nla;
++ u16 encap_type;
++ int attrlen;
++
++ while (rtnh_ok(rtnh, remaining)) {
++ attrlen = rtnh_attrlen(rtnh);
++ if (attrlen > 0) {
++ attrs = rtnh_attrs(rtnh);
++ nla = nla_find(attrs, attrlen, RTA_ENCAP);
++ nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
++
++ if (nla_entype) {
++ encap_type = nla_get_u16(nla_entype);
++
++ if (lwtunnel_valid_encap_type(encap_type) != 0)
++ return -EOPNOTSUPP;
++ }
++ }
++ rtnh = rtnh_next(rtnh, &remaining);
++ }
++
++ return 0;
+ }
+-EXPORT_SYMBOL(lwtunnel_build_state);
++EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
+
+ int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
+ {
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 30e2e21..3ff9d97 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1201,6 +1201,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
+ {
+ struct dsa_slave_priv *p = netdev_priv(slave_dev);
+
++ netif_device_detach(slave_dev);
++
+ if (p->phy) {
+ phy_stop(p->phy);
+ p->old_pause = -1;
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 3e4f183..5b03d7f 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -46,6 +46,7 @@
+ #include <net/rtnetlink.h>
+ #include <net/xfrm.h>
+ #include <net/l3mdev.h>
++#include <net/lwtunnel.h>
+ #include <trace/events/fib.h>
+
+ #ifndef CONFIG_IP_MULTIPLE_TABLES
+@@ -676,6 +677,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
+ cfg->fc_mx_len = nla_len(attr);
+ break;
+ case RTA_MULTIPATH:
++ err = lwtunnel_valid_encap_type_attr(nla_data(attr),
++ nla_len(attr));
++ if (err < 0)
++ goto errout;
+ cfg->fc_mp = nla_data(attr);
+ cfg->fc_mp_len = nla_len(attr);
+ break;
+@@ -690,6 +695,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
+ break;
+ case RTA_ENCAP_TYPE:
+ cfg->fc_encap_type = nla_get_u16(attr);
++ err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
++ if (err < 0)
++ goto errout;
+ break;
+ }
+ }
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index a8508b7..6a40680 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1278,8 +1278,9 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
+ nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
+ goto nla_put_failure;
+ #endif
+- if (fi->fib_nh->nh_lwtstate)
+- lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
++ if (fi->fib_nh->nh_lwtstate &&
++ lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
++ goto nla_put_failure;
+ }
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+ if (fi->fib_nhs > 1) {
+@@ -1315,8 +1316,10 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
+ nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
+ goto nla_put_failure;
+ #endif
+- if (nh->nh_lwtstate)
+- lwtunnel_fill_encap(skb, nh->nh_lwtstate);
++ if (nh->nh_lwtstate &&
++ lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
++ goto nla_put_failure;
++
+ /* length of rtnetlink header + attributes */
+ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
+ } endfor_nexthops(fi);
+diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
+index fed3d29..0fd1976 100644
+--- a/net/ipv4/ip_tunnel_core.c
++++ b/net/ipv4/ip_tunnel_core.c
+@@ -313,6 +313,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
+ .fill_encap = ip_tun_fill_encap_info,
+ .get_encap_size = ip_tun_encap_nlsize,
+ .cmp_encap = ip_tun_cmp_encap,
++ .owner = THIS_MODULE,
+ };
+
+ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
+@@ -403,6 +404,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
+ .fill_encap = ip6_tun_fill_encap_info,
+ .get_encap_size = ip6_tun_encap_nlsize,
+ .cmp_encap = ip_tun_cmp_encap,
++ .owner = THIS_MODULE,
+ };
+
+ void __init ip_tunnel_core_init(void)
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 8197b06..d851cae 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -2440,7 +2440,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
+ r->rtm_dst_len = 32;
+ r->rtm_src_len = 0;
+ r->rtm_tos = fl4->flowi4_tos;
+- r->rtm_table = table_id;
++ r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
+ if (nla_put_u32(skb, RTA_TABLE, table_id))
+ goto nla_put_failure;
+ r->rtm_type = rt->rt_type;
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 4e777a3..dd2560c 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -113,7 +113,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
+ struct tcp_fastopen_cookie tmp;
+
+ if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
+- struct in6_addr *buf = (struct in6_addr *) tmp.val;
++ struct in6_addr *buf = &tmp.addr;
+ int i;
+
+ for (i = 0; i < 4; i++)
+@@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
+ * scaled. So correct it appropriately.
+ */
+ tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
++ tp->max_window = tp->snd_wnd;
+
+ /* Activate the retrans timer so that SYNACK can be retransmitted.
+ * The request socket is not added to the ehash
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 4bc5ba3..95dfcba 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5515,8 +5515,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
+ struct net_device *dev;
+ struct inet6_dev *idev;
+
+- rcu_read_lock();
+- for_each_netdev_rcu(net, dev) {
++ for_each_netdev(net, dev) {
+ idev = __in6_dev_get(dev);
+ if (idev) {
+ int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
+@@ -5525,7 +5524,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
+ dev_disable_change(idev);
+ }
+ }
+- rcu_read_unlock();
+ }
+
+ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
+diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
+index e50c27a..f3db364 100644
+--- a/net/ipv6/ila/ila_lwt.c
++++ b/net/ipv6/ila/ila_lwt.c
+@@ -164,6 +164,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = {
+ .fill_encap = ila_fill_encap_info,
+ .get_encap_size = ila_encap_nlsize,
+ .cmp_encap = ila_encap_cmp,
++ .owner = THIS_MODULE,
+ };
+
+ int ila_lwt_init(void)
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index d76674e..f95437f 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1108,7 +1108,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ t->parms.name);
+ goto tx_err_dst_release;
+ }
+- mtu = dst_mtu(dst) - psh_hlen;
++ mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
+ if (encap_limit >= 0) {
+ max_headroom += 8;
+ mtu -= 8;
+@@ -1117,7 +1117,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
+ mtu = IPV6_MIN_MTU;
+ if (skb_dst(skb) && !t->parms.collect_md)
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+- if (skb->len > mtu && !skb_is_gso(skb)) {
++ if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
+ *pmtu = mtu;
+ err = -EMSGSIZE;
+ goto tx_err_dst_release;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 1b57e11..bff4460 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2885,6 +2885,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (tb[RTA_MULTIPATH]) {
+ cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
+ cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
++
++ err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
++ cfg->fc_mp_len);
++ if (err < 0)
++ goto errout;
+ }
+
+ if (tb[RTA_PREF]) {
+@@ -2898,9 +2903,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (tb[RTA_ENCAP])
+ cfg->fc_encap = tb[RTA_ENCAP];
+
+- if (tb[RTA_ENCAP_TYPE])
++ if (tb[RTA_ENCAP_TYPE]) {
+ cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
+
++ err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
++ if (err < 0)
++ goto errout;
++ }
++
+ if (tb[RTA_EXPIRES]) {
+ unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
+
+@@ -3306,7 +3316,8 @@ static int rt6_fill_node(struct net *net,
+ if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
+ goto nla_put_failure;
+
+- lwtunnel_fill_encap(skb, rt->dst.lwtstate);
++ if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
++ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return 0;
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index 15fe976..5b77377 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -98,18 +98,19 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+ }
+ EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
+
+-static u32 mpls_multipath_hash(struct mpls_route *rt,
+- struct sk_buff *skb, bool bos)
++static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
+ {
+ struct mpls_entry_decoded dec;
++ unsigned int mpls_hdr_len = 0;
+ struct mpls_shim_hdr *hdr;
+ bool eli_seen = false;
+ int label_index;
+ u32 hash = 0;
+
+- for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
++ for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
+ label_index++) {
+- if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
++ mpls_hdr_len += sizeof(*hdr);
++ if (!pskb_may_pull(skb, mpls_hdr_len))
+ break;
+
+ /* Read and decode the current label */
+@@ -134,37 +135,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt,
+ eli_seen = true;
+ }
+
+- bos = dec.bos;
+- if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
+- sizeof(struct iphdr))) {
++ if (!dec.bos)
++ continue;
++
++ /* found bottom label; does skb have room for a header? */
++ if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
+ const struct iphdr *v4hdr;
+
+- v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
+- label_index);
++ v4hdr = (const struct iphdr *)(hdr + 1);
+ if (v4hdr->version == 4) {
+ hash = jhash_3words(ntohl(v4hdr->saddr),
+ ntohl(v4hdr->daddr),
+ v4hdr->protocol, hash);
+ } else if (v4hdr->version == 6 &&
+- pskb_may_pull(skb, sizeof(*hdr) * label_index +
+- sizeof(struct ipv6hdr))) {
++ pskb_may_pull(skb, mpls_hdr_len +
++ sizeof(struct ipv6hdr))) {
+ const struct ipv6hdr *v6hdr;
+
+- v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
+- label_index);
+-
++ v6hdr = (const struct ipv6hdr *)(hdr + 1);
+ hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
+ hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
+ hash = jhash_1word(v6hdr->nexthdr, hash);
+ }
+ }
++
++ break;
+ }
+
+ return hash;
+ }
+
+ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
+- struct sk_buff *skb, bool bos)
++ struct sk_buff *skb)
+ {
+ int alive = ACCESS_ONCE(rt->rt_nhn_alive);
+ u32 hash = 0;
+@@ -180,7 +182,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
+ if (alive <= 0)
+ return NULL;
+
+- hash = mpls_multipath_hash(rt, skb, bos);
++ hash = mpls_multipath_hash(rt, skb);
+ nh_index = hash % alive;
+ if (alive == rt->rt_nhn)
+ goto out;
+@@ -278,17 +280,11 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
+ hdr = mpls_hdr(skb);
+ dec = mpls_entry_decode(hdr);
+
+- /* Pop the label */
+- skb_pull(skb, sizeof(*hdr));
+- skb_reset_network_header(skb);
+-
+- skb_orphan(skb);
+-
+ rt = mpls_route_input_rcu(net, dec.label);
+ if (!rt)
+ goto drop;
+
+- nh = mpls_select_multipath(rt, skb, dec.bos);
++ nh = mpls_select_multipath(rt, skb);
+ if (!nh)
+ goto drop;
+
+@@ -297,6 +293,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
+ if (!mpls_output_possible(out_dev))
+ goto drop;
+
++ /* Pop the label */
++ skb_pull(skb, sizeof(*hdr));
++ skb_reset_network_header(skb);
++
++ skb_orphan(skb);
++
+ if (skb_warn_if_lro(skb))
+ goto drop;
+
+diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
+index cf52cf3..bc9aaf5 100644
+--- a/net/mpls/mpls_iptunnel.c
++++ b/net/mpls/mpls_iptunnel.c
+@@ -218,6 +218,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = {
+ .fill_encap = mpls_fill_encap_info,
+ .get_encap_size = mpls_encap_nlsize,
+ .cmp_encap = mpls_encap_cmp,
++ .owner = THIS_MODULE,
+ };
+
+ static int __init mpls_iptunnel_init(void)
+diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
+index fecefa2..eab210b 100644
+--- a/net/openvswitch/conntrack.c
++++ b/net/openvswitch/conntrack.c
+@@ -514,7 +514,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
+ int hooknum, nh_off, err = NF_ACCEPT;
+
+ nh_off = skb_network_offset(skb);
+- skb_pull(skb, nh_off);
++ skb_pull_rcsum(skb, nh_off);
+
+ /* See HOOK2MANIP(). */
+ if (maniptype == NF_NAT_MANIP_SRC)
+@@ -579,6 +579,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
+ err = nf_nat_packet(ct, ctinfo, hooknum, skb);
+ push:
+ skb_push(skb, nh_off);
++ skb_postpush_rcsum(skb, skb->data, nh_off);
+
+ return err;
+ }
+@@ -890,7 +891,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
+
+ /* The conntrack module expects to be working at L3. */
+ nh_ofs = skb_network_offset(skb);
+- skb_pull(skb, nh_ofs);
++ skb_pull_rcsum(skb, nh_ofs);
+
+ if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
+ err = handle_fragments(net, key, info->zone.id, skb);
+@@ -904,6 +905,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
+ err = ovs_ct_lookup(net, key, info, skb);
+
+ skb_push(skb, nh_ofs);
++ skb_postpush_rcsum(skb, skb->data, nh_ofs);
+ if (err)
+ kfree_skb(skb);
+ return err;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index dd23323..94e4a59 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1972,7 +1972,7 @@ static int __packet_rcv_vnet(const struct sk_buff *skb,
+ {
+ *vnet_hdr = (const struct virtio_net_hdr) { 0 };
+
+- if (virtio_net_hdr_from_skb(skb, vnet_hdr, vio_le()))
++ if (virtio_net_hdr_from_skb(skb, vnet_hdr, vio_le(), true))
+ BUG();
+
+ return 0;
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index f893d18..c6c2a93 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -903,8 +903,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
+ goto err;
+ }
+ act->order = i;
+- if (event == RTM_GETACTION)
+- act->tcfa_refcnt++;
+ list_add_tail(&act->list, &actions);
+ }
+
+@@ -917,7 +915,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
+ return ret;
+ }
+ err:
+- tcf_action_destroy(&actions, 0);
++ if (event != RTM_GETACTION)
++ tcf_action_destroy(&actions, 0);
+ return ret;
+ }
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 2358f26..2d03d5b 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -995,6 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ unsigned int hash;
+ struct unix_address *addr;
+ struct hlist_head *list;
++ struct path path = { NULL, NULL };
+
+ err = -EINVAL;
+ if (sunaddr->sun_family != AF_UNIX)
+@@ -1010,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ goto out;
+ addr_len = err;
+
++ if (sun_path[0]) {
++ umode_t mode = S_IFSOCK |
++ (SOCK_INODE(sock)->i_mode & ~current_umask());
++ err = unix_mknod(sun_path, mode, &path);
++ if (err) {
++ if (err == -EEXIST)
++ err = -EADDRINUSE;
++ goto out;
++ }
++ }
++
+ err = mutex_lock_interruptible(&u->bindlock);
+ if (err)
+- goto out;
++ goto out_put;
+
+ err = -EINVAL;
+ if (u->addr)
+@@ -1029,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ atomic_set(&addr->refcnt, 1);
+
+ if (sun_path[0]) {
+- struct path path;
+- umode_t mode = S_IFSOCK |
+- (SOCK_INODE(sock)->i_mode & ~current_umask());
+- err = unix_mknod(sun_path, mode, &path);
+- if (err) {
+- if (err == -EEXIST)
+- err = -EADDRINUSE;
+- unix_release_addr(addr);
+- goto out_up;
+- }
+ addr->hash = UNIX_HASH_SIZE;
+ hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+ spin_lock(&unix_table_lock);
+@@ -1065,6 +1067,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ spin_unlock(&unix_table_lock);
+ out_up:
+ mutex_unlock(&u->bindlock);
++out_put:
++ if (err)
++ path_put(&path);
+ out:
+ return err;
+ }
diff --git a/4.9.9/1008_linux-4.9.9.patch b/4.9.9/1008_linux-4.9.9.patch
new file mode 100644
index 0000000..411ce9b
--- /dev/null
+++ b/4.9.9/1008_linux-4.9.9.patch
@@ -0,0 +1,2333 @@
+diff --git a/Makefile b/Makefile
+index 1130803..c0c41c9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
+index c53dbea..838dad5 100644
+--- a/arch/arm64/crypto/aes-modes.S
++++ b/arch/arm64/crypto/aes-modes.S
+@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
+ cbz w6, .Lcbcencloop
+
+ ld1 {v0.16b}, [x5] /* get iv */
+- enc_prepare w3, x2, x5
++ enc_prepare w3, x2, x6
+
+ .Lcbcencloop:
+ ld1 {v1.16b}, [x1], #16 /* get next pt block */
+ eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
+- encrypt_block v0, w3, x2, x5, w6
++ encrypt_block v0, w3, x2, x6, w7
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
+ bne .Lcbcencloop
++ st1 {v0.16b}, [x5] /* return iv */
+ ret
+ AES_ENDPROC(aes_cbc_encrypt)
+
+@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ cbz w6, .LcbcdecloopNx
+
+ ld1 {v7.16b}, [x5] /* get iv */
+- dec_prepare w3, x2, x5
++ dec_prepare w3, x2, x6
+
+ .LcbcdecloopNx:
+ #if INTERLEAVE >= 2
+@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ .Lcbcdecloop:
+ ld1 {v1.16b}, [x1], #16 /* get next ct block */
+ mov v0.16b, v1.16b /* ...and copy to v0 */
+- decrypt_block v0, w3, x2, x5, w6
++ decrypt_block v0, w3, x2, x6, w7
+ eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
+ mov v7.16b, v1.16b /* ct is next iv */
+ st1 {v0.16b}, [x0], #16
+@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ bne .Lcbcdecloop
+ .Lcbcdecout:
+ FRAME_POP
++ st1 {v7.16b}, [x5] /* return iv */
+ ret
+ AES_ENDPROC(aes_cbc_decrypt)
+
+@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
+
+ AES_ENTRY(aes_ctr_encrypt)
+ FRAME_PUSH
+- cbnz w6, .Lctrfirst /* 1st time around? */
+- umov x5, v4.d[1] /* keep swabbed ctr in reg */
+- rev x5, x5
+-#if INTERLEAVE >= 2
+- cmn w5, w4 /* 32 bit overflow? */
+- bcs .Lctrinc
+- add x5, x5, #1 /* increment BE ctr */
+- b .LctrincNx
+-#else
+- b .Lctrinc
+-#endif
+-.Lctrfirst:
++ cbz w6, .Lctrnotfirst /* 1st time around? */
+ enc_prepare w3, x2, x6
+ ld1 {v4.16b}, [x5]
+- umov x5, v4.d[1] /* keep swabbed ctr in reg */
+- rev x5, x5
++
++.Lctrnotfirst:
++ umov x8, v4.d[1] /* keep swabbed ctr in reg */
++ rev x8, x8
+ #if INTERLEAVE >= 2
+- cmn w5, w4 /* 32 bit overflow? */
++ cmn w8, w4 /* 32 bit overflow? */
+ bcs .Lctrloop
+ .LctrloopNx:
+ subs w4, w4, #INTERLEAVE
+@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
+ #if INTERLEAVE == 2
+ mov v0.8b, v4.8b
+ mov v1.8b, v4.8b
+- rev x7, x5
+- add x5, x5, #1
++ rev x7, x8
++ add x8, x8, #1
+ ins v0.d[1], x7
+- rev x7, x5
+- add x5, x5, #1
++ rev x7, x8
++ add x8, x8, #1
+ ins v1.d[1], x7
+ ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
+ do_encrypt_block2x
+@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
+ st1 {v0.16b-v1.16b}, [x0], #32
+ #else
+ ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
+- dup v7.4s, w5
++ dup v7.4s, w8
+ mov v0.16b, v4.16b
+ add v7.4s, v7.4s, v8.4s
+ mov v1.16b, v4.16b
+@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
+ eor v2.16b, v7.16b, v2.16b
+ eor v3.16b, v5.16b, v3.16b
+ st1 {v0.16b-v3.16b}, [x0], #64
+- add x5, x5, #INTERLEAVE
++ add x8, x8, #INTERLEAVE
+ #endif
+- cbz w4, .LctroutNx
+-.LctrincNx:
+- rev x7, x5
++ rev x7, x8
+ ins v4.d[1], x7
++ cbz w4, .Lctrout
+ b .LctrloopNx
+-.LctroutNx:
+- sub x5, x5, #1
+- rev x7, x5
+- ins v4.d[1], x7
+- b .Lctrout
+ .Lctr1x:
+ adds w4, w4, #INTERLEAVE
+ beq .Lctrout
+@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
+ .Lctrloop:
+ mov v0.16b, v4.16b
+ encrypt_block v0, w3, x2, x6, w7
++
++ adds x8, x8, #1 /* increment BE ctr */
++ rev x7, x8
++ ins v4.d[1], x7
++ bcs .Lctrcarry /* overflow? */
++
++.Lctrcarrydone:
+ subs w4, w4, #1
+ bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
+ ld1 {v3.16b}, [x1], #16
+ eor v3.16b, v0.16b, v3.16b
+ st1 {v3.16b}, [x0], #16
+- beq .Lctrout
+-.Lctrinc:
+- adds x5, x5, #1 /* increment BE ctr */
+- rev x7, x5
+- ins v4.d[1], x7
+- bcc .Lctrloop /* no overflow? */
+- umov x7, v4.d[0] /* load upper word of ctr */
+- rev x7, x7 /* ... to handle the carry */
+- add x7, x7, #1
+- rev x7, x7
+- ins v4.d[0], x7
+- b .Lctrloop
++ bne .Lctrloop
++
++.Lctrout:
++ st1 {v4.16b}, [x5] /* return next CTR value */
++ FRAME_POP
++ ret
++
+ .Lctrhalfblock:
+ ld1 {v3.8b}, [x1]
+ eor v3.8b, v0.8b, v3.8b
+ st1 {v3.8b}, [x0]
+-.Lctrout:
+ FRAME_POP
+ ret
++
++.Lctrcarry:
++ umov x7, v4.d[0] /* load upper word of ctr */
++ rev x7, x7 /* ... to handle the carry */
++ add x7, x7, #1
++ rev x7, x7
++ ins v4.d[0], x7
++ b .Lctrcarrydone
+ AES_ENDPROC(aes_ctr_encrypt)
+ .ltorg
+
+diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
+index b312b15..6e834ca 100644
+--- a/arch/powerpc/include/asm/cpu_has_feature.h
++++ b/arch/powerpc/include/asm/cpu_has_feature.h
+@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
+ {
+ int i;
+
++#ifndef __clang__ /* clang can't cope with this */
+ BUILD_BUG_ON(!__builtin_constant_p(feature));
++#endif
+
+ #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+ if (!static_key_initialized) {
+diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
+index e311c25..a244e09 100644
+--- a/arch/powerpc/include/asm/mmu.h
++++ b/arch/powerpc/include/asm/mmu.h
+@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
+ {
+ int i;
+
++#ifndef __clang__ /* clang can't cope with this */
+ BUILD_BUG_ON(!__builtin_constant_p(feature));
++#endif
+
+ #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
+ if (!static_key_initialized) {
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 5c31369..a5dd493 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
+ static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
+ {
+ struct eeh_pe *pe = (struct eeh_pe *)data;
+- bool *clear_sw_state = flag;
++ bool clear_sw_state = *(bool *)flag;
+ int i, rc = 1;
+
+ for (i = 0; rc && i < 3; i++)
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index 88ac964..1e8c572 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -2747,6 +2747,9 @@ static void __init prom_find_boot_cpu(void)
+
+ cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
+
++ if (!PHANDLE_VALID(cpu_pkg))
++ return;
++
+ prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
+ prom.cpu = be32_to_cpu(rval);
+
+diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
+index ebb7f46..9a25dce 100644
+--- a/arch/powerpc/mm/pgtable-radix.c
++++ b/arch/powerpc/mm/pgtable-radix.c
+@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
+ if (!pmdp)
+ return -ENOMEM;
+ if (map_page_size == PMD_SIZE) {
+- ptep = (pte_t *)pudp;
++ ptep = pmdp_ptep(pmdp);
+ goto set_the_pte;
+ }
+ ptep = pte_alloc_kernel(pmdp, ea);
+@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
+ }
+ pmdp = pmd_offset(pudp, ea);
+ if (map_page_size == PMD_SIZE) {
+- ptep = (pte_t *)pudp;
++ ptep = pmdp_ptep(pmdp);
+ goto set_the_pte;
+ }
+ if (!pmd_present(*pmdp)) {
+diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
+index dbaaf7dc..19d646a 100644
+--- a/arch/x86/events/intel/uncore.c
++++ b/arch/x86/events/intel/uncore.c
+@@ -763,30 +763,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
+ pmu->registered = false;
+ }
+
+-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
+-{
+- struct intel_uncore_pmu *pmu = type->pmus;
+- struct intel_uncore_box *box;
+- int i, pkg;
+-
+- if (pmu) {
+- pkg = topology_physical_package_id(cpu);
+- for (i = 0; i < type->num_boxes; i++, pmu++) {
+- box = pmu->boxes[pkg];
+- if (box)
+- uncore_box_exit(box);
+- }
+- }
+-}
+-
+-static void uncore_exit_boxes(void *dummy)
+-{
+- struct intel_uncore_type **types;
+-
+- for (types = uncore_msr_uncores; *types; types++)
+- __uncore_exit_boxes(*types++, smp_processor_id());
+-}
+-
+ static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
+ {
+ int pkg;
+@@ -1077,22 +1053,12 @@ static int uncore_cpu_dying(unsigned int cpu)
+ return 0;
+ }
+
+-static int first_init;
+-
+ static int uncore_cpu_starting(unsigned int cpu)
+ {
+ struct intel_uncore_type *type, **types = uncore_msr_uncores;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+- int i, pkg, ncpus = 1;
+-
+- if (first_init) {
+- /*
+- * On init we get the number of online cpus in the package
+- * and set refcount for all of them.
+- */
+- ncpus = cpumask_weight(topology_core_cpumask(cpu));
+- }
++ int i, pkg;
+
+ pkg = topology_logical_package_id(cpu);
+ for (; *types; types++) {
+@@ -1103,7 +1069,7 @@ static int uncore_cpu_starting(unsigned int cpu)
+ if (!box)
+ continue;
+ /* The first cpu on a package activates the box */
+- if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
++ if (atomic_inc_return(&box->refcnt) == 1)
+ uncore_box_init(box);
+ }
+ }
+@@ -1407,19 +1373,17 @@ static int __init intel_uncore_init(void)
+ "PERF_X86_UNCORE_PREP",
+ uncore_cpu_prepare, NULL);
+ }
+- first_init = 1;
++
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
+ "AP_PERF_X86_UNCORE_STARTING",
+ uncore_cpu_starting, uncore_cpu_dying);
+- first_init = 0;
++
+ cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+ "AP_PERF_X86_UNCORE_ONLINE",
+ uncore_event_cpu_online, uncore_event_cpu_offline);
+ return 0;
+
+ err:
+- /* Undo box->init_box() */
+- on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
+ uncore_types_exit(uncore_msr_uncores);
+ uncore_pci_exit();
+ return ret;
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index 3d8ff40..7249f15 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2118,6 +2118,7 @@ static inline void __init check_timer(void)
+ if (idx != -1 && irq_trigger(idx))
+ unmask_ioapic_irq(irq_get_chip_data(0));
+ }
++ irq_domain_deactivate_irq(irq_data);
+ irq_domain_activate_irq(irq_data);
+ if (timer_irq_works()) {
+ if (disable_timer_pin_1 > 0)
+@@ -2139,6 +2140,7 @@ static inline void __init check_timer(void)
+ * legacy devices should be connected to IO APIC #0
+ */
+ replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
++ irq_domain_deactivate_irq(irq_data);
+ irq_domain_activate_irq(irq_data);
+ legacy_pic->unmask(0);
+ if (timer_irq_works()) {
+diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
+index 274fab9..932348fb 100644
+--- a/arch/x86/kernel/hpet.c
++++ b/arch/x86/kernel/hpet.c
+@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
+ } else {
+ struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+
++ irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
+ irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
+ disable_irq(hdev->irq);
+ irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 487b957..731044e 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3148,6 +3148,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+ memcpy(dest, xsave, XSAVE_HDR_OFFSET);
+
+ /* Set XSTATE_BV */
++ xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
+ *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
+
+ /*
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index 319148b..2f25a36 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -269,6 +269,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ efi_scratch.use_pgd = true;
+
+ /*
++ * Certain firmware versions are way too sentimential and still believe
++ * they are exclusive and unquestionable owners of the first physical page,
++ * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
++ * (but then write-access it later during SetVirtualAddressMap()).
++ *
++ * Create a 1:1 mapping for this page, to avoid triple faults during early
++ * boot with such firmware. We are free to hand this page to the BIOS,
++ * as trim_bios_range() will reserve the first page and isolate it away
++ * from memory allocators anyway.
++ */
++ if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
++ pr_err("Failed to create 1:1 mapping for the first page!\n");
++ return 1;
++ }
++
++ /*
+ * When making calls to the firmware everything needs to be 1:1
+ * mapped and addressable with 32-bit pointers. Map the kernel
+ * text and allocate a new stack because we can't rely on the
+diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
+index 88a044a..32cdc2c 100644
+--- a/arch/xtensa/kernel/setup.c
++++ b/arch/xtensa/kernel/setup.c
+@@ -540,7 +540,7 @@ subsys_initcall(topology_init);
+
+ void cpu_reset(void)
+ {
+-#if XCHAL_HAVE_PTP_MMU
++#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
+ local_irq_disable();
+ /*
+ * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index df939b5..1fad2a6 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
+ struct crypto_larval *larval;
+ int err;
+
++ alg->cra_flags &= ~CRYPTO_ALG_DEAD;
+ err = crypto_check_alg(alg);
+ if (err)
+ return err;
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 223a770..33e363d 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -1695,6 +1695,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
+
+ if (qc->err_mask & ~AC_ERR_OTHER)
+ qc->err_mask &= ~AC_ERR_OTHER;
++ } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
++ qc->result_tf.command |= ATA_SENSE;
+ }
+
+ /* finish up */
+@@ -4316,10 +4318,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+
+ /*
+- * Device times out with higher max sects.
++ * These devices time out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+ */
+- { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
++ { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+
+ /* Devices we expect to fail diagnostics */
+
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index 823e938..2f32782 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
+ host->iomap = NULL;
+ hpriv->base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
++ if (!hpriv->base)
++ return -ENOMEM;
++
+ hpriv->base -= SATAHC0_REG_BASE;
+
+ hpriv->clk = clk_get(&pdev->dev, NULL);
+diff --git a/drivers/base/memory.c b/drivers/base/memory.c
+index e7f86a8..c5cdd19 100644
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -391,33 +391,33 @@ static ssize_t show_valid_zones(struct device *dev,
+ {
+ struct memory_block *mem = to_memory_block(dev);
+ unsigned long start_pfn, end_pfn;
++ unsigned long valid_start, valid_end, valid_pages;
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+- struct page *first_page;
+ struct zone *zone;
+ int zone_shift = 0;
+
+ start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ end_pfn = start_pfn + nr_pages;
+- first_page = pfn_to_page(start_pfn);
+
+ /* The block contains more than one zone can not be offlined. */
+- if (!test_pages_in_a_zone(start_pfn, end_pfn))
++ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
+ return sprintf(buf, "none\n");
+
+- zone = page_zone(first_page);
++ zone = page_zone(pfn_to_page(valid_start));
++ valid_pages = valid_end - valid_start;
+
+ /* MMOP_ONLINE_KEEP */
+ sprintf(buf, "%s", zone->name);
+
+ /* MMOP_ONLINE_KERNEL */
+- zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
++ zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
+ if (zone_shift) {
+ strcat(buf, " ");
+ strcat(buf, (zone + zone_shift)->name);
+ }
+
+ /* MMOP_ONLINE_MOVABLE */
+- zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
++ zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
+ if (zone_shift) {
+ strcat(buf, " ");
+ strcat(buf, (zone + zone_shift)->name);
+diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
+index f642c42..168fa17 100644
+--- a/drivers/bcma/bcma_private.h
++++ b/drivers/bcma/bcma_private.h
+@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
+ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
+ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
+ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
++#ifdef CONFIG_BCMA_DRIVER_MIPS
++void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
++#endif /* CONFIG_BCMA_DRIVER_MIPS */
+
+ /* driver_chipcommon_b.c */
+ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
+diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
+index b4f6520..62f5bfa 100644
+--- a/drivers/bcma/driver_chipcommon.c
++++ b/drivers/bcma/driver_chipcommon.c
+@@ -15,8 +15,6 @@
+ #include <linux/platform_device.h>
+ #include <linux/bcma/bcma.h>
+
+-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+-
+ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
+ u32 mask, u32 value)
+ {
+@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
+ if (cc->capabilities & BCMA_CC_CAP_PMU)
+ bcma_pmu_early_init(cc);
+
+- if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
+- bcma_chipco_serial_init(cc);
+-
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ bcma_core_chipcommon_flash_detect(cc);
+
+@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
+ return res;
+ }
+
+-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
++#ifdef CONFIG_BCMA_DRIVER_MIPS
++void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+ {
+-#if IS_BUILTIN(CONFIG_BCM47XX)
+ unsigned int irq;
+ u32 baud_base;
+ u32 i;
+@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+ ports[i].baud_base = baud_base;
+ ports[i].reg_shift = 0;
+ }
+-#endif /* CONFIG_BCM47XX */
+ }
++#endif /* CONFIG_BCMA_DRIVER_MIPS */
+diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
+index 96f1713..89af807 100644
+--- a/drivers/bcma/driver_mips.c
++++ b/drivers/bcma/driver_mips.c
+@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
+
+ void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
+ {
++ struct bcma_bus *bus = mcore->core->bus;
++
+ if (mcore->early_setup_done)
+ return;
+
++ bcma_chipco_serial_init(&bus->drv_cc);
+ bcma_core_mips_nvram_init(mcore);
+
+ mcore->early_setup_done = true;
+diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
+index d5ba43a..55c1782 100644
+--- a/drivers/dma/cppi41.c
++++ b/drivers/dma/cppi41.c
+@@ -153,6 +153,8 @@ struct cppi41_dd {
+
+ /* context for suspend/resume */
+ unsigned int dma_tdfdq;
++
++ bool is_suspended;
+ };
+
+ #define FIST_COMPLETION_QUEUE 93
+@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
+ BUG_ON(desc_num >= ALLOC_DECS_NUM);
+ c = cdd->chan_busy[desc_num];
+ cdd->chan_busy[desc_num] = NULL;
++
++ /* Usecount for chan_busy[], paired with push_desc_queue() */
++ pm_runtime_put(cdd->ddev.dev);
++
+ return c;
+ }
+
+@@ -447,6 +453,15 @@ static void push_desc_queue(struct cppi41_channel *c)
+ */
+ __iowmb();
+
++ /*
++ * DMA transfers can take at least 200ms to complete with USB mass
++ * storage connected. To prevent autosuspend timeouts, we must use
++ * pm_runtime_get/put() when chan_busy[] is modified. This will get
++ * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
++ * outcome of the transfer.
++ */
++ pm_runtime_get(cdd->ddev.dev);
++
+ desc_phys = lower_32_bits(c->desc_phys);
+ desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ WARN_ON(cdd->chan_busy[desc_num]);
+@@ -457,20 +472,26 @@ static void push_desc_queue(struct cppi41_channel *c)
+ cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
+ }
+
+-static void pending_desc(struct cppi41_channel *c)
++/*
++ * Caller must hold cdd->lock to prevent push_desc_queue()
++ * getting called out of order. We have both cppi41_dma_issue_pending()
++ * and cppi41_runtime_resume() call this function.
++ */
++static void cppi41_run_queue(struct cppi41_dd *cdd)
+ {
+- struct cppi41_dd *cdd = c->cdd;
+- unsigned long flags;
++ struct cppi41_channel *c, *_c;
+
+- spin_lock_irqsave(&cdd->lock, flags);
+- list_add_tail(&c->node, &cdd->pending);
+- spin_unlock_irqrestore(&cdd->lock, flags);
++ list_for_each_entry_safe(c, _c, &cdd->pending, node) {
++ push_desc_queue(c);
++ list_del(&c->node);
++ }
+ }
+
+ static void cppi41_dma_issue_pending(struct dma_chan *chan)
+ {
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_dd *cdd = c->cdd;
++ unsigned long flags;
+ int error;
+
+ error = pm_runtime_get(cdd->ddev.dev);
+@@ -482,10 +503,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
+ return;
+ }
+
+- if (likely(pm_runtime_active(cdd->ddev.dev)))
+- push_desc_queue(c);
+- else
+- pending_desc(c);
++ spin_lock_irqsave(&cdd->lock, flags);
++ list_add_tail(&c->node, &cdd->pending);
++ if (!cdd->is_suspended)
++ cppi41_run_queue(cdd);
++ spin_unlock_irqrestore(&cdd->lock, flags);
+
+ pm_runtime_mark_last_busy(cdd->ddev.dev);
+ pm_runtime_put_autosuspend(cdd->ddev.dev);
+@@ -705,6 +727,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
+ WARN_ON(!cdd->chan_busy[desc_num]);
+ cdd->chan_busy[desc_num] = NULL;
+
++ /* Usecount for chan_busy[], paired with push_desc_queue() */
++ pm_runtime_put(cdd->ddev.dev);
++
+ return 0;
+ }
+
+@@ -1150,8 +1175,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
+ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
+ {
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
++ unsigned long flags;
+
++ spin_lock_irqsave(&cdd->lock, flags);
++ cdd->is_suspended = true;
+ WARN_ON(!list_empty(&cdd->pending));
++ spin_unlock_irqrestore(&cdd->lock, flags);
+
+ return 0;
+ }
+@@ -1159,14 +1188,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
+ static int __maybe_unused cppi41_runtime_resume(struct device *dev)
+ {
+ struct cppi41_dd *cdd = dev_get_drvdata(dev);
+- struct cppi41_channel *c, *_c;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cdd->lock, flags);
+- list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+- push_desc_queue(c);
+- list_del(&c->node);
+- }
++ cdd->is_suspended = false;
++ cppi41_run_queue(cdd);
+ spin_unlock_irqrestore(&cdd->lock, flags);
+
+ return 0;
+diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
+index 921dfa0..260c4b4 100644
+--- a/drivers/firmware/efi/libstub/fdt.c
++++ b/drivers/firmware/efi/libstub/fdt.c
+@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
+ struct exit_boot_struct {
+ efi_memory_desc_t *runtime_map;
+ int *runtime_entry_count;
++ void *new_fdt_addr;
+ };
+
+ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+ efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
+ p->runtime_map, p->runtime_entry_count);
+
+- return EFI_SUCCESS;
++ return update_fdt_memmap(p->new_fdt_addr, map);
+ }
+
+ /*
+@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
+
+ priv.runtime_map = runtime_map;
+ priv.runtime_entry_count = &runtime_entry_count;
++ priv.new_fdt_addr = (void *)*new_fdt_addr;
+ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+ exit_boot_func);
+
+ if (status == EFI_SUCCESS) {
+ efi_set_virtual_address_map_t *svam;
+
+- status = update_fdt_memmap((void *)*new_fdt_addr, &map);
+- if (status != EFI_SUCCESS) {
+- /*
+- * The kernel won't get far without the memory map, but
+- * may still be able to print something meaningful so
+- * return success here.
+- */
+- return EFI_SUCCESS;
+- }
+-
+ /* Install the new virtual address map */
+ svam = sys_table->runtime->set_virtual_address_map;
+ status = svam(runtime_entry_count * desc_size, desc_size,
+diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+index b13c8aa..6df924f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+@@ -227,6 +227,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
+ }
+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
++ if (adev->mode_info.num_crtc)
++ amdgpu_display_set_vga_render_state(adev, false);
++
+ gmc_v6_0_mc_stop(adev, &save);
+
+ if (gmc_v6_0_wait_for_idle((void *)adev)) {
+@@ -256,7 +259,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
+ dev_warn(adev->dev, "Wait for MC idle timedout !\n");
+ }
+ gmc_v6_0_mc_resume(adev, &save);
+- amdgpu_display_set_vga_render_state(adev, false);
+ }
+
+ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index 67db157..4147e51 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -2152,30 +2152,42 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
+
+ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
+ {
+- struct i915_gem_context *ctx = dev_priv->kernel_context;
+ struct intel_engine_cs *engine;
++ struct i915_gem_context *ctx;
++
++ /* Because we emit WA_TAIL_DWORDS there may be a disparity
++ * between our bookkeeping in ce->ring->head and ce->ring->tail and
++ * that stored in context. As we only write new commands from
++ * ce->ring->tail onwards, everything before that is junk. If the GPU
++ * starts reading from its RING_HEAD from the context, it may try to
++ * execute that junk and die.
++ *
++ * So to avoid that we reset the context images upon resume. For
++ * simplicity, we just zero everything out.
++ */
++ list_for_each_entry(ctx, &dev_priv->context_list, link) {
++ for_each_engine(engine, dev_priv) {
++ struct intel_context *ce = &ctx->engine[engine->id];
++ u32 *reg;
+
+- for_each_engine(engine, dev_priv) {
+- struct intel_context *ce = &ctx->engine[engine->id];
+- void *vaddr;
+- uint32_t *reg_state;
+-
+- if (!ce->state)
+- continue;
+-
+- vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
+- if (WARN_ON(IS_ERR(vaddr)))
+- continue;
++ if (!ce->state)
++ continue;
+
+- reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
++ reg = i915_gem_object_pin_map(ce->state->obj,
++ I915_MAP_WB);
++ if (WARN_ON(IS_ERR(reg)))
++ continue;
+
+- reg_state[CTX_RING_HEAD+1] = 0;
+- reg_state[CTX_RING_TAIL+1] = 0;
++ reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
++ reg[CTX_RING_HEAD+1] = 0;
++ reg[CTX_RING_TAIL+1] = 0;
+
+- ce->state->obj->dirty = true;
+- i915_gem_object_unpin_map(ce->state->obj);
++ ce->state->obj->dirty = true;
++ i915_gem_object_unpin_map(ce->state->obj);
+
+- ce->ring->head = 0;
+- ce->ring->tail = 0;
++ ce->ring->head = ce->ring->tail = 0;
++ ce->ring->last_retired_head = -1;
++ intel_ring_update_space(ce->ring);
++ }
+ }
+ }
+diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
+index 74856a8..e64f524 100644
+--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
++++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
+@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
+ uint32_t mpllP;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
++ mpllP = (mpllP >> 8) & 0xf;
+ if (!mpllP)
+ mpllP = 4;
+
+@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
+ uint32_t clock;
+
+ pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
+- return clock;
++ return clock / 1000;
+ }
+
+ ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+index 6f0436d..f8f2f16 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
+ );
+ }
+ for (i = 0; i < size; i++)
+- nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
++ nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
+ for (; i < 0x60; i++)
+ nvkm_wr32(device, 0x61c440 + soff, (i << 8));
+ nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 60d3020..e06c134 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -167,7 +167,7 @@ struct cp2112_device {
+ atomic_t xfer_avail;
+ struct gpio_chip gc;
+ u8 *in_out_buffer;
+- spinlock_t lock;
++ struct mutex lock;
+ };
+
+ static int gpio_push_pull = 0xFF;
+@@ -179,10 +179,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+@@ -206,8 +205,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+ ret = 0;
+
+ exit:
+- spin_unlock_irqrestore(&dev->lock, flags);
+- return ret <= 0 ? ret : -EIO;
++ mutex_unlock(&dev->lock);
++ return ret < 0 ? ret : -EIO;
+ }
+
+ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+@@ -215,10 +214,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ buf[0] = CP2112_GPIO_SET;
+ buf[1] = value ? 0xff : 0;
+@@ -230,7 +228,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ if (ret < 0)
+ hid_err(hdev, "error setting GPIO values: %d\n", ret);
+
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+ }
+
+ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+@@ -238,10 +236,9 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
+ CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
+@@ -255,7 +252,7 @@ static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
+ ret = (buf[1] >> offset) & 1;
+
+ exit:
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+
+ return ret;
+ }
+@@ -266,10 +263,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
+ struct cp2112_device *dev = gpiochip_get_data(chip);
+ struct hid_device *hdev = dev->hdev;
+ u8 *buf = dev->in_out_buffer;
+- unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&dev->lock, flags);
++ mutex_lock(&dev->lock);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+@@ -290,7 +286,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
+ goto fail;
+ }
+
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+
+ /*
+ * Set gpio value when output direction is already set,
+@@ -301,7 +297,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
+ return 0;
+
+ fail:
+- spin_unlock_irqrestore(&dev->lock, flags);
++ mutex_unlock(&dev->lock);
+ return ret < 0 ? ret : -EIO;
+ }
+
+@@ -1057,7 +1053,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ if (!dev->in_out_buffer)
+ return -ENOMEM;
+
+- spin_lock_init(&dev->lock);
++ mutex_init(&dev->lock);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 575aa65..9845189 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -76,6 +76,9 @@
+ #define USB_VENDOR_ID_ALPS_JP 0x044E
+ #define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
+
++#define USB_VENDOR_ID_AMI 0x046b
++#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
++
+ #define USB_VENDOR_ID_ANTON 0x1130
+ #define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
+
+diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
+index c5c5fbe..52026dc 100644
+--- a/drivers/hid/hid-lg.c
++++ b/drivers/hid/hid-lg.c
+@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
+ .driver_data = LG_NOGET | LG_FF4 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
+- .driver_data = LG_FF2 },
++ .driver_data = LG_NOGET | LG_FF2 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
+ .driver_data = LG_FF3 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index e6cfd32..cde060f 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -57,6 +57,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
++ { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index 1cb7992..623be90 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -164,19 +164,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
+ wacom->id[0] = STYLUS_DEVICE_ID;
+ }
+
+- pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
+- if (features->pressure_max > 255)
+- pressure = (pressure << 1) | ((data[4] >> 6) & 1);
+- pressure += (features->pressure_max + 1) / 2;
+-
+- input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+- input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+- input_report_abs(input, ABS_PRESSURE, pressure);
+-
+- input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+- input_report_key(input, BTN_STYLUS, data[4] & 0x10);
+- /* Only allow the stylus2 button to be reported for the pen tool. */
+- input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
++ if (prox) {
++ pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
++ if (features->pressure_max > 255)
++ pressure = (pressure << 1) | ((data[4] >> 6) & 1);
++ pressure += (features->pressure_max + 1) / 2;
++
++ input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
++ input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
++ input_report_abs(input, ABS_PRESSURE, pressure);
++
++ input_report_key(input, BTN_TOUCH, data[4] & 0x08);
++ input_report_key(input, BTN_STYLUS, data[4] & 0x10);
++ /* Only allow the stylus2 button to be reported for the pen tool. */
++ input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
++ }
+
+ if (!prox)
+ wacom->id[0] = 0;
+diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
+index 2bbf0c5..7d61b56 100644
+--- a/drivers/iio/adc/palmas_gpadc.c
++++ b/drivers/iio/adc/palmas_gpadc.c
+@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
+
+ static int palmas_gpadc_suspend(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct palmas_gpadc *adc = iio_priv(indio_dev);
+ int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
+ int ret;
+@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
+
+ static int palmas_gpadc_resume(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct palmas_gpadc *adc = iio_priv(indio_dev);
+ int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
+ int ret;
+diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
+index 9a08146..6bb23a4 100644
+--- a/drivers/iio/health/afe4403.c
++++ b/drivers/iio/health/afe4403.c
+@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
+
+ static int __maybe_unused afe4403_suspend(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret;
+
+@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
+
+ static int __maybe_unused afe4403_resume(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
+ struct afe4403_data *afe = iio_priv(indio_dev);
+ int ret;
+
+diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
+index 4526640..964f523 100644
+--- a/drivers/iio/health/afe4404.c
++++ b/drivers/iio/health/afe4404.c
+@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
+
+ static int __maybe_unused afe4404_suspend(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret;
+
+@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
+
+ static int __maybe_unused afe4404_resume(struct device *dev)
+ {
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
++ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct afe4404_data *afe = iio_priv(indio_dev);
+ int ret;
+
+diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
+index 90ab8a2d..183c143 100644
+--- a/drivers/iio/health/max30100.c
++++ b/drivers/iio/health/max30100.c
+@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
+
+ mutex_lock(&data->lock);
+
+- while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
++ while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
+ ret = max30100_read_measurement(data);
+ if (ret)
+ break;
+diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
+index 9c47bc9..2a22ad9 100644
+--- a/drivers/iio/humidity/dht11.c
++++ b/drivers/iio/humidity/dht11.c
+@@ -71,7 +71,8 @@
+ * a) select an implementation using busy loop polling on those systems
+ * b) use the checksum to do some probabilistic decoding
+ */
+-#define DHT11_START_TRANSMISSION 18 /* ms */
++#define DHT11_START_TRANSMISSION_MIN 18000 /* us */
++#define DHT11_START_TRANSMISSION_MAX 20000 /* us */
+ #define DHT11_MIN_TIMERES 34000 /* ns */
+ #define DHT11_THRESHOLD 49000 /* ns */
+ #define DHT11_AMBIG_LOW 23000 /* ns */
+@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
+ ret = gpio_direction_output(dht11->gpio, 0);
+ if (ret)
+ goto err;
+- msleep(DHT11_START_TRANSMISSION);
++ usleep_range(DHT11_START_TRANSMISSION_MIN,
++ DHT11_START_TRANSMISSION_MAX);
+ ret = gpio_direction_input(dht11->gpio);
+ if (ret)
+ goto err;
+diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
+index bb0fde6..cc2243f 100644
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -321,7 +321,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+- FW_RI_RES_WR_FBMAX_V(2) |
++ (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
++ FW_RI_RES_WR_FBMAX_V(3)) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
+@@ -345,7 +346,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+ FW_RI_RES_WR_DCAEN_V(0) |
+ FW_RI_RES_WR_DCACPU_V(0) |
+ FW_RI_RES_WR_FBMIN_V(2) |
+- FW_RI_RES_WR_FBMAX_V(2) |
++ FW_RI_RES_WR_FBMAX_V(3) |
+ FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
+ FW_RI_RES_WR_CIDXFTHRESH_V(0) |
+ FW_RI_RES_WR_EQSIZE_V(eqsize));
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index e1e274a..ba637ff 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2719,7 +2719,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
+ if (intmask & SDHCI_INT_RETUNE)
+ mmc_retune_needed(host->mmc);
+
+- if (intmask & SDHCI_INT_CARD_INT) {
++ if ((intmask & SDHCI_INT_CARD_INT) &&
++ (host->ier & SDHCI_INT_CARD_INT)) {
+ sdhci_enable_sdio_irq_nolock(host, false);
+ host->thread_isr |= SDHCI_INT_CARD_INT;
+ result = IRQ_WAKE_THREAD;
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+index d02ca14..8d3e53f 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+@@ -91,7 +91,7 @@
+
+ #define IWL8000_FW_PRE "iwlwifi-8000C-"
+ #define IWL8000_MODULE_FIRMWARE(api) \
+- IWL8000_FW_PRE "-" __stringify(api) ".ucode"
++ IWL8000_FW_PRE __stringify(api) ".ucode"
+
+ #define IWL8265_FW_PRE "iwlwifi-8265-"
+ #define IWL8265_MODULE_FIRMWARE(api) \
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index fc77188..52de3c6 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -1144,9 +1144,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+
+- /* Make sure reserved queue is still marked as such (or allocated) */
+- mvm->queue_info[mvm_sta->reserved_queue].status =
+- IWL_MVM_QUEUE_RESERVED;
++ /* Make sure reserved queue is still marked as such (if allocated) */
++ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
++ mvm->queue_info[mvm_sta->reserved_queue].status =
++ IWL_MVM_QUEUE_RESERVED;
+
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+ struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
+diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
+index 0ec649d..b0916b1 100644
+--- a/drivers/pci/pcie/aspm.c
++++ b/drivers/pci/pcie/aspm.c
+@@ -518,25 +518,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return NULL;
++
+ INIT_LIST_HEAD(&link->sibling);
+ INIT_LIST_HEAD(&link->children);
+ INIT_LIST_HEAD(&link->link);
+ link->pdev = pdev;
+- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
++
++ /*
++ * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
++ * hierarchies.
++ */
++ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
++ pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
++ link->root = link;
++ } else {
+ struct pcie_link_state *parent;
++
+ parent = pdev->bus->parent->self->link_state;
+ if (!parent) {
+ kfree(link);
+ return NULL;
+ }
++
+ link->parent = parent;
++ link->root = link->parent->root;
+ list_add(&link->link, &parent->children);
+ }
+- /* Setup a pointer to the root port link */
+- if (!link->parent)
+- link->root = link;
+- else
+- link->root = link->parent->root;
+
+ list_add(&link->sibling, &link_list);
+ pdev->link_state = link;
+diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
+index 0790153..583ae3f 100644
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
+ int reg)
+ {
+ struct byt_community *comm = byt_get_community(vg, offset);
+- u32 reg_offset = 0;
++ u32 reg_offset;
+
+ if (!comm)
+ return NULL;
+
+ offset -= comm->pin_base;
+- if (reg == BYT_INT_STAT_REG)
++ switch (reg) {
++ case BYT_INT_STAT_REG:
+ reg_offset = (offset / 32) * 4;
+- else
++ break;
++ case BYT_DEBOUNCE_REG:
++ reg_offset = 0;
++ break;
++ default:
+ reg_offset = comm->pad_map[offset] * 16;
++ break;
++ }
+
+ return comm->reg_base + reg_offset + reg;
+ }
+@@ -1612,7 +1619,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
+ continue;
+ }
+
++ raw_spin_lock(&vg->lock);
+ pending = readl(reg);
++ raw_spin_unlock(&vg->lock);
+ for_each_set_bit(pin, &pending, 32) {
+ virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
+ generic_handle_irq(virq);
+diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
+index 7826c7f..9931be6 100644
+--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
++++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
+@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned int i;
+ int ret;
+
++ if (!mrfld_buf_available(mp, pin))
++ return -ENOTSUPP;
++
+ for (i = 0; i < nconfigs; i++) {
+ switch (pinconf_to_config_param(configs[i])) {
+ case PIN_CONFIG_BIAS_DISABLE:
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
+index e6a512e..a3ade9e 100644
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
+ 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
+ BIT(3)),
+ AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
+- AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
++ AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
+ AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
+ AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
+ AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
+diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
+index 113f3d6..27f75b1 100644
+--- a/drivers/staging/greybus/timesync_platform.c
++++ b/drivers/staging/greybus/timesync_platform.c
+@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
+
+ int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
+ {
++ if (!arche_platform_change_state_cb)
++ return 0;
++
+ return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
+ pdata);
+ }
+
+ void gb_timesync_platform_unlock_bus(void)
+ {
++ if (!arche_platform_change_state_cb)
++ return;
++
+ arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
+ }
+
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index d2e50a2..24f9f98 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* CBM - Flash disk */
+ { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
+
++ /* WORLDE easy key (easykey.25) MIDI controller */
++ { USB_DEVICE(0x0218, 0x0401), .driver_info =
++ USB_QUIRK_CONFIG_INTF_STRINGS },
++
+ /* HP 5300/5370C scanner */
+ { USB_DEVICE(0x03f0, 0x0701), .driver_info =
+ USB_QUIRK_STRING_FETCH_255 },
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 17989b7..8d412d8 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
+ if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
+ return -EINVAL;
+ length = le32_to_cpu(d->dwSize);
++ if (len < length)
++ return -EINVAL;
+ type = le32_to_cpu(d->dwPropertyDataType);
+ if (type < USB_EXT_PROP_UNICODE ||
+ type > USB_EXT_PROP_UNICODE_MULTI) {
+@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
+ return -EINVAL;
+ }
+ pnl = le16_to_cpu(d->wPropertyNameLength);
++ if (length < 14 + pnl) {
++ pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
++ length, pnl, type);
++ return -EINVAL;
++ }
+ pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
+ if (length != 14 + pnl + pdl) {
+ pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
+@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
+ }
+ }
+ if (flags & (1 << i)) {
++ if (len < 4) {
++ goto error;
++ }
+ os_descs_count = get_unaligned_le32(data);
+ data += 4;
+ len -= 4;
+@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
+
+ ENTER();
+
+- if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
++ if (unlikely(len < 16 ||
++ get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+ get_unaligned_le32(data + 4) != len))
+ goto error;
+ str_count = get_unaligned_le32(data + 8);
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index c3e172e..338575f 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -578,11 +578,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
+ | MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
+- musb->need_finish_resume = 1;
+-
+ musb->xceiv->otg->state = OTG_STATE_A_HOST;
+ musb->is_active = 1;
+ musb_host_resume_root_hub(musb);
++ schedule_delayed_work(&musb->finish_resume_work,
++ msecs_to_jiffies(USB_RESUME_TIMEOUT));
+ break;
+ case OTG_STATE_B_WAIT_ACON:
+ musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
+@@ -2691,11 +2691,6 @@ static int musb_resume(struct device *dev)
+ mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
+ if ((devctl & mask) != (musb->context.devctl & mask))
+ musb->port1_status = 0;
+- if (musb->need_finish_resume) {
+- musb->need_finish_resume = 0;
+- schedule_delayed_work(&musb->finish_resume_work,
+- msecs_to_jiffies(USB_RESUME_TIMEOUT));
+- }
+
+ /*
+ * The USB HUB code expects the device to be in RPM_ACTIVE once it came
+@@ -2747,12 +2742,6 @@ static int musb_runtime_resume(struct device *dev)
+
+ musb_restore_context(musb);
+
+- if (musb->need_finish_resume) {
+- musb->need_finish_resume = 0;
+- schedule_delayed_work(&musb->finish_resume_work,
+- msecs_to_jiffies(USB_RESUME_TIMEOUT));
+- }
+-
+ spin_lock_irqsave(&musb->lock, flags);
+ error = musb_run_resume_work(musb);
+ if (error)
+diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
+index 47331db..854fbf7 100644
+--- a/drivers/usb/musb/musb_core.h
++++ b/drivers/usb/musb/musb_core.h
+@@ -410,7 +410,6 @@ struct musb {
+
+ /* is_suspended means USB B_PERIPHERAL suspend */
+ unsigned is_suspended:1;
+- unsigned need_finish_resume :1;
+
+ /* may_wakeup means remote wakeup is enabled */
+ unsigned may_wakeup:1;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 7ce31a4..42cc72e 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
++ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ { } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 46fca6b..1db4b61 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
+ { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
++ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
+ { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
+ { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
+ { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index e3b7af8..09d9be8 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -27,6 +27,7 @@
+ #define ATEN_VENDOR_ID 0x0557
+ #define ATEN_VENDOR_ID2 0x0547
+ #define ATEN_PRODUCT_ID 0x2008
++#define ATEN_PRODUCT_ID2 0x2118
+
+ #define IODATA_VENDOR_ID 0x04bb
+ #define IODATA_PRODUCT_ID 0x0a03
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 1bc6089..696458d 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
+ {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
+ {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
+ {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
++ {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */
+ {USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
+ {USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
+ {USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index c6f2d89..64613fb 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
+
+ static void vhost_init_is_le(struct vhost_virtqueue *vq)
+ {
+- if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
+- vq->is_le = true;
++ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
++ || virtio_legacy_is_little_endian();
+ }
+ #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
+
+ static void vhost_reset_is_le(struct vhost_virtqueue *vq)
+ {
+- vq->is_le = virtio_legacy_is_little_endian();
++ vhost_init_is_le(vq);
+ }
+
+ struct vhost_flush_struct {
+@@ -1713,10 +1713,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
+ int r;
+ bool is_le = vq->is_le;
+
+- if (!vq->private_data) {
+- vhost_reset_is_le(vq);
++ if (!vq->private_data)
+ return 0;
+- }
+
+ vhost_init_is_le(vq);
+
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index f136048..489bfc6 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
+ if (xen_domain())
+ return true;
+
+- /*
+- * On ARM-based machines, the DMA ops will do the right thing,
+- * so always use them with legacy devices.
+- */
+- if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
+- return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+-
+ return false;
+ }
+
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 8f6a2a5..a27fc87 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
+ rc = -ENOMEM;
+ goto error_exit;
+ }
++ spin_lock_init(&cifsFile->file_info_lock);
+ file->private_data = cifsFile;
+ cifsFile->tlink = cifs_get_tlink(tlink);
+ tcon = tlink_tcon(tlink);
+diff --git a/fs/dax.c b/fs/dax.c
+index 014defd..bf6218d 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -1270,6 +1270,11 @@ iomap_dax_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct blk_dax_ctl dax = { 0 };
+ ssize_t map_len;
+
++ if (fatal_signal_pending(current)) {
++ ret = -EINTR;
++ break;
++ }
++
+ dax.sector = iomap->blkno +
+ (((pos & PAGE_MASK) - iomap->offset) >> 9);
+ dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 478630a..bbc316d 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3827,6 +3827,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+ db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
+ EXT4_DESC_PER_BLOCK(sb);
++ if (ext4_has_feature_meta_bg(sb)) {
++ if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
++ ext4_msg(sb, KERN_WARNING,
++ "first meta block group too large: %u "
++ "(group descriptor block count %u)",
++ le32_to_cpu(es->s_first_meta_bg), db_count);
++ goto failed_mount;
++ }
++ }
+ sbi->s_group_desc = ext4_kvmalloc(db_count *
+ sizeof(struct buffer_head *),
+ GFP_KERNEL);
+diff --git a/fs/iomap.c b/fs/iomap.c
+index a8ee8c3..814ae8f 100644
+--- a/fs/iomap.c
++++ b/fs/iomap.c
+@@ -113,6 +113,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
+
+ BUG_ON(pos + len > iomap->offset + iomap->length);
+
++ if (fatal_signal_pending(current))
++ return -EINTR;
++
+ page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
+index 42aace4..6481369 100644
+--- a/fs/nfsd/nfs4layouts.c
++++ b/fs/nfsd/nfs4layouts.c
+@@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
+ struct nfs4_layout_stateid *ls;
+ struct nfs4_stid *stp;
+
+- stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
++ stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
++ nfsd4_free_layout_stateid);
+ if (!stp)
+ return NULL;
+- stp->sc_free = nfsd4_free_layout_stateid;
++
+ get_nfs4_file(fp);
+ stp->sc_file = fp;
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 4b4beaa..a0dee8a 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -633,8 +633,8 @@ find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
+ return co;
+ }
+
+-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+- struct kmem_cache *slab)
++struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
++ void (*sc_free)(struct nfs4_stid *))
+ {
+ struct nfs4_stid *stid;
+ int new_id;
+@@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+ idr_preload_end();
+ if (new_id < 0)
+ goto out_free;
++
++ stid->sc_free = sc_free;
+ stid->sc_client = cl;
+ stid->sc_stateid.si_opaque.so_id = new_id;
+ stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
+@@ -675,15 +677,12 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
+ {
+ struct nfs4_stid *stid;
+- struct nfs4_ol_stateid *stp;
+
+- stid = nfs4_alloc_stid(clp, stateid_slab);
++ stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
+ if (!stid)
+ return NULL;
+
+- stp = openlockstateid(stid);
+- stp->st_stid.sc_free = nfs4_free_ol_stateid;
+- return stp;
++ return openlockstateid(stid);
+ }
+
+ static void nfs4_free_deleg(struct nfs4_stid *stid)
+@@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
+ goto out_dec;
+ if (delegation_blocked(&current_fh->fh_handle))
+ goto out_dec;
+- dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
++ dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
+ if (dp == NULL)
+ goto out_dec;
+
+- dp->dl_stid.sc_free = nfs4_free_deleg;
+ /*
+ * delegation seqid's are never incremented. The 4.1 special
+ * meaning of seqid 0 isn't meaningful, really, but let's avoid
+@@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
+ stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
+ get_nfs4_file(fp);
+ stp->st_stid.sc_file = fp;
+- stp->st_stid.sc_free = nfs4_free_lock_stateid;
+ stp->st_access_bmap = 0;
+ stp->st_deny_bmap = open_stp->st_deny_bmap;
+ stp->st_openstp = open_stp;
+@@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
+ lst = find_lock_stateid(lo, fi);
+ if (lst == NULL) {
+ spin_unlock(&clp->cl_lock);
+- ns = nfs4_alloc_stid(clp, stateid_slab);
++ ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
+ if (ns == NULL)
+ return NULL;
+
+diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
+index c939936..4516e8b 100644
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
+ __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ stateid_t *stateid, unsigned char typemask,
+ struct nfs4_stid **s, struct nfsd_net *nn);
+-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+- struct kmem_cache *slab);
++struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
++ void (*sc_free)(struct nfs4_stid *));
+ void nfs4_unhash_stid(struct nfs4_stid *s);
+ void nfs4_put_stid(struct nfs4_stid *s);
+ void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index e798755..39e3254 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -184,6 +184,7 @@ struct irq_data {
+ *
+ * IRQD_TRIGGER_MASK - Mask for the trigger type bits
+ * IRQD_SETAFFINITY_PENDING - Affinity setting is pending
++ * IRQD_ACTIVATED - Interrupt has already been activated
+ * IRQD_NO_BALANCING - Balancing disabled for this IRQ
+ * IRQD_PER_CPU - Interrupt is per cpu
+ * IRQD_AFFINITY_SET - Interrupt affinity was set
+@@ -202,6 +203,7 @@ struct irq_data {
+ enum {
+ IRQD_TRIGGER_MASK = 0xf,
+ IRQD_SETAFFINITY_PENDING = (1 << 8),
++ IRQD_ACTIVATED = (1 << 9),
+ IRQD_NO_BALANCING = (1 << 10),
+ IRQD_PER_CPU = (1 << 11),
+ IRQD_AFFINITY_SET = (1 << 12),
+@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
+ return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
+ }
+
++static inline bool irqd_is_activated(struct irq_data *d)
++{
++ return __irqd_to_state(d) & IRQD_ACTIVATED;
++}
++
++static inline void irqd_set_activated(struct irq_data *d)
++{
++ __irqd_to_state(d) |= IRQD_ACTIVATED;
++}
++
++static inline void irqd_clr_activated(struct irq_data *d)
++{
++ __irqd_to_state(d) &= ~IRQD_ACTIVATED;
++}
++
+ #undef __irqd_to_state
+
+ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
+diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
+index c1784c0..134a2f6 100644
+--- a/include/linux/memory_hotplug.h
++++ b/include/linux/memory_hotplug.h
+@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
+ extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
+ /* VM interface that may be used by firmware interface */
+ extern int online_pages(unsigned long, unsigned long, int);
+-extern int test_pages_in_a_zone(unsigned long, unsigned long);
++extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
++ unsigned long *valid_start, unsigned long *valid_end);
+ extern void __offline_isolated_pages(unsigned long, unsigned long);
+
+ typedef void (*online_page_callback_t)(struct page *page);
+diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
+index 1c7eec0..3a481a4 100644
+--- a/include/linux/percpu-refcount.h
++++ b/include/linux/percpu-refcount.h
+@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
+ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+ {
+ unsigned long __percpu *percpu_count;
+- int ret;
++ bool ret;
+
+ rcu_read_lock_sched();
+
+@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
+ {
+ unsigned long __percpu *percpu_count;
+- int ret = false;
++ bool ret = false;
+
+ rcu_read_lock_sched();
+
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 85bc9be..4e2f3de 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -5219,6 +5219,11 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ return ERR_PTR(err);
+ }
+
++/*
++ * The returned cgroup is fully initialized including its control mask, but
++ * it isn't associated with its kernfs_node and doesn't have the control
++ * mask applied.
++ */
+ static struct cgroup *cgroup_create(struct cgroup *parent)
+ {
+ struct cgroup_root *root = parent->root;
+@@ -5283,11 +5288,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
+
+ cgroup_propagate_control(cgrp);
+
+- /* @cgrp doesn't have dir yet so the following will only create csses */
+- ret = cgroup_apply_control_enable(cgrp);
+- if (ret)
+- goto out_destroy;
+-
+ return cgrp;
+
+ out_cancel_ref:
+@@ -5295,9 +5295,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
+ out_free_cgrp:
+ kfree(cgrp);
+ return ERR_PTR(ret);
+-out_destroy:
+- cgroup_destroy_locked(cgrp);
+- return ERR_PTR(ret);
+ }
+
+ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e5a8839..b1cfd74 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
+ static void
+ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
+ {
+-
+ lockdep_assert_held(&ctx->lock);
+
+ WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
+@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
+ {
+ struct perf_event *group_leader = event->group_leader, *pos;
+
++ lockdep_assert_held(&event->ctx->lock);
++
+ /*
+ * We can have double attach due to group movement in perf_event_open.
+ */
+@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
+ struct perf_event *sibling, *tmp;
+ struct list_head *list = NULL;
+
++ lockdep_assert_held(&event->ctx->lock);
++
+ /*
+ * We can have double detach due to exit/hot-unplug + close.
+ */
+@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
+ */
+ static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
+ {
+- lockdep_assert_held(&event->ctx->mutex);
++ struct perf_event_context *ctx = event->ctx;
++
++ lockdep_assert_held(&ctx->mutex);
+
+ event_function_call(event, __perf_remove_from_context, (void *)flags);
++
++ /*
++ * The above event_function_call() can NO-OP when it hits
++ * TASK_TOMBSTONE. In that case we must already have been detached
++ * from the context (by perf_event_exit_event()) but the grouping
++ * might still be in-tact.
++ */
++ WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
++ if ((flags & DETACH_GROUP) &&
++ (event->attach_state & PERF_ATTACH_GROUP)) {
++ /*
++ * Since in that case we cannot possibly be scheduled, simply
++ * detach now.
++ */
++ raw_spin_lock_irq(&ctx->lock);
++ perf_group_detach(event);
++ raw_spin_unlock_irq(&ctx->lock);
++ }
+ }
+
+ /*
+@@ -6583,6 +6606,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+ char *buf = NULL;
+ char *name;
+
++ if (vma->vm_flags & VM_READ)
++ prot |= PROT_READ;
++ if (vma->vm_flags & VM_WRITE)
++ prot |= PROT_WRITE;
++ if (vma->vm_flags & VM_EXEC)
++ prot |= PROT_EXEC;
++
++ if (vma->vm_flags & VM_MAYSHARE)
++ flags = MAP_SHARED;
++ else
++ flags = MAP_PRIVATE;
++
++ if (vma->vm_flags & VM_DENYWRITE)
++ flags |= MAP_DENYWRITE;
++ if (vma->vm_flags & VM_MAYEXEC)
++ flags |= MAP_EXECUTABLE;
++ if (vma->vm_flags & VM_LOCKED)
++ flags |= MAP_LOCKED;
++ if (vma->vm_flags & VM_HUGETLB)
++ flags |= MAP_HUGETLB;
++
+ if (file) {
+ struct inode *inode;
+ dev_t dev;
+@@ -6609,27 +6653,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+ maj = MAJOR(dev);
+ min = MINOR(dev);
+
+- if (vma->vm_flags & VM_READ)
+- prot |= PROT_READ;
+- if (vma->vm_flags & VM_WRITE)
+- prot |= PROT_WRITE;
+- if (vma->vm_flags & VM_EXEC)
+- prot |= PROT_EXEC;
+-
+- if (vma->vm_flags & VM_MAYSHARE)
+- flags = MAP_SHARED;
+- else
+- flags = MAP_PRIVATE;
+-
+- if (vma->vm_flags & VM_DENYWRITE)
+- flags |= MAP_DENYWRITE;
+- if (vma->vm_flags & VM_MAYEXEC)
+- flags |= MAP_EXECUTABLE;
+- if (vma->vm_flags & VM_LOCKED)
+- flags |= MAP_LOCKED;
+- if (vma->vm_flags & VM_HUGETLB)
+- flags |= MAP_HUGETLB;
+-
+ goto got_name;
+ } else {
+ if (vma->vm_ops && vma->vm_ops->name) {
+diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
+index 8c0a0ae..b59e676 100644
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -1346,6 +1346,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
+ }
+ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
+
++static void __irq_domain_activate_irq(struct irq_data *irq_data)
++{
++ if (irq_data && irq_data->domain) {
++ struct irq_domain *domain = irq_data->domain;
++
++ if (irq_data->parent_data)
++ __irq_domain_activate_irq(irq_data->parent_data);
++ if (domain->ops->activate)
++ domain->ops->activate(domain, irq_data);
++ }
++}
++
++static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
++{
++ if (irq_data && irq_data->domain) {
++ struct irq_domain *domain = irq_data->domain;
++
++ if (domain->ops->deactivate)
++ domain->ops->deactivate(domain, irq_data);
++ if (irq_data->parent_data)
++ __irq_domain_deactivate_irq(irq_data->parent_data);
++ }
++}
++
+ /**
+ * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
+ * interrupt
+@@ -1356,13 +1380,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
+ */
+ void irq_domain_activate_irq(struct irq_data *irq_data)
+ {
+- if (irq_data && irq_data->domain) {
+- struct irq_domain *domain = irq_data->domain;
+-
+- if (irq_data->parent_data)
+- irq_domain_activate_irq(irq_data->parent_data);
+- if (domain->ops->activate)
+- domain->ops->activate(domain, irq_data);
++ if (!irqd_is_activated(irq_data)) {
++ __irq_domain_activate_irq(irq_data);
++ irqd_set_activated(irq_data);
+ }
+ }
+
+@@ -1376,13 +1396,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data)
+ */
+ void irq_domain_deactivate_irq(struct irq_data *irq_data)
+ {
+- if (irq_data && irq_data->domain) {
+- struct irq_domain *domain = irq_data->domain;
+-
+- if (domain->ops->deactivate)
+- domain->ops->deactivate(domain, irq_data);
+- if (irq_data->parent_data)
+- irq_domain_deactivate_irq(irq_data->parent_data);
++ if (irqd_is_activated(irq_data)) {
++ __irq_domain_deactivate_irq(irq_data);
++ irqd_clr_activated(irq_data);
+ }
+ }
+
+diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
+index b97286c..f00b013 100644
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -266,7 +266,7 @@ static int get_sample(void)
+ static struct cpumask save_cpumask;
+ static bool disable_migrate;
+
+-static void move_to_next_cpu(void)
++static void move_to_next_cpu(bool initmask)
+ {
+ static struct cpumask *current_mask;
+ int next_cpu;
+@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
+ return;
+
+ /* Just pick the first CPU on first iteration */
+- if (!current_mask) {
++ if (initmask) {
+ current_mask = &save_cpumask;
+ get_online_cpus();
+ cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
+@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
+ static int kthread_fn(void *data)
+ {
+ u64 interval;
++ bool initmask = true;
+
+ while (!kthread_should_stop()) {
+
+- move_to_next_cpu();
++ move_to_next_cpu(initmask);
++ initmask = false;
+
+ local_irq_disable();
+ get_sample();
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 7798010..d8d7df8 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1703,6 +1703,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
+
+ cond_resched();
+ find_page:
++ if (fatal_signal_pending(current)) {
++ error = -EINTR;
++ goto out;
++ }
++
+ page = find_get_page(mapping, index);
+ if (!page) {
+ page_cache_sync_readahead(mapping,
+diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
+index c3a8141..ede13734 100644
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1483,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
+ }
+
+ /*
+- * Confirm all pages in a range [start, end) is belongs to the same zone.
++ * Confirm all pages in a range [start, end) belong to the same zone.
++ * When true, return its valid [start, end).
+ */
+-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
++int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
++ unsigned long *valid_start, unsigned long *valid_end)
+ {
+ unsigned long pfn, sec_end_pfn;
++ unsigned long start, end;
+ struct zone *zone = NULL;
+ struct page *page;
+ int i;
+- for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
++ for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
+ pfn < end_pfn;
+- pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
++ pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
+ /* Make sure the memory section is present first */
+ if (!present_section_nr(pfn_to_section_nr(pfn)))
+ continue;
+@@ -1509,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+ page = pfn_to_page(pfn + i);
+ if (zone && page_zone(page) != zone)
+ return 0;
++ if (!zone)
++ start = pfn + i;
+ zone = page_zone(page);
++ end = pfn + MAX_ORDER_NR_PAGES;
+ }
+ }
+- return 1;
++
++ if (zone) {
++ *valid_start = start;
++ *valid_end = end;
++ return 1;
++ } else {
++ return 0;
++ }
+ }
+
+ /*
+@@ -1859,6 +1872,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
+ long offlined_pages;
+ int ret, drain, retry_max, node;
+ unsigned long flags;
++ unsigned long valid_start, valid_end;
+ struct zone *zone;
+ struct memory_notify arg;
+
+@@ -1869,10 +1883,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
+ return -EINVAL;
+ /* This makes hotplug much easier...and readable.
+ we assume this for now. .*/
+- if (!test_pages_in_a_zone(start_pfn, end_pfn))
++ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
+ return -EINVAL;
+
+- zone = page_zone(pfn_to_page(start_pfn));
++ zone = page_zone(pfn_to_page(valid_start));
+ node = zone_to_nid(zone);
+ nr_pages = end_pfn - start_pfn;
+
+diff --git a/mm/zswap.c b/mm/zswap.c
+index 275b22c..dbef278 100644
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
+
+ /* Enable/disable zswap (disabled by default) */
+ static bool zswap_enabled;
+-module_param_named(enabled, zswap_enabled, bool, 0644);
++static int zswap_enabled_param_set(const char *,
++ const struct kernel_param *);
++static struct kernel_param_ops zswap_enabled_param_ops = {
++ .set = zswap_enabled_param_set,
++ .get = param_get_bool,
++};
++module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
+
+ /* Crypto compressor to use */
+ #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
+@@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
+ /* used by param callback function */
+ static bool zswap_init_started;
+
++/* fatal error during init */
++static bool zswap_init_failed;
++
+ /*********************************
+ * helpers and fwd declarations
+ **********************************/
+@@ -706,6 +715,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
+ char *s = strstrip((char *)val);
+ int ret;
+
++ if (zswap_init_failed) {
++ pr_err("can't set param, initialization failed\n");
++ return -ENODEV;
++ }
++
+ /* no change required */
+ if (!strcmp(s, *(char **)kp->arg))
+ return 0;
+@@ -785,6 +799,17 @@ static int zswap_zpool_param_set(const char *val,
+ return __zswap_param_set(val, kp, NULL, zswap_compressor);
+ }
+
++static int zswap_enabled_param_set(const char *val,
++ const struct kernel_param *kp)
++{
++ if (zswap_init_failed) {
++ pr_err("can't enable, initialization failed\n");
++ return -ENODEV;
++ }
++
++ return param_set_bool(val, kp);
++}
++
+ /*********************************
+ * writeback code
+ **********************************/
+@@ -1271,6 +1296,9 @@ static int __init init_zswap(void)
+ dstmem_fail:
+ zswap_entry_cache_destroy();
+ cache_fail:
++ /* if built-in, we aren't unloaded on failure; don't allow use */
++ zswap_init_failed = true;
++ zswap_enabled = false;
+ return -ENOMEM;
+ }
+ /* must be late so crypto has time to come up */
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 436a753..5e9ed5e 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
+
+ static void bcm_remove_op(struct bcm_op *op)
+ {
+- hrtimer_cancel(&op->timer);
+- hrtimer_cancel(&op->thrtimer);
+-
+- if (op->tsklet.func)
+- tasklet_kill(&op->tsklet);
++ if (op->tsklet.func) {
++ while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
++ test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
++ hrtimer_active(&op->timer)) {
++ hrtimer_cancel(&op->timer);
++ tasklet_kill(&op->tsklet);
++ }
++ }
+
+- if (op->thrtsklet.func)
+- tasklet_kill(&op->thrtsklet);
++ if (op->thrtsklet.func) {
++ while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
++ test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
++ hrtimer_active(&op->thrtimer)) {
++ hrtimer_cancel(&op->thrtimer);
++ tasklet_kill(&op->thrtsklet);
++ }
++ }
+
+ if ((op->frames) && (op->frames != &op->sframe))
+ kfree(op->frames);
+diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+index dc6fb79..25d9a9c 100644
+--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
+ if (!oa->data)
+ return -ENOMEM;
+
+- creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
++ creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
+ if (!creds) {
+ kfree(oa->data);
+ return -ENOMEM;
diff --git a/4.8.17/4420_grsecurity-3.1-4.8.17-201701151620.patch b/4.9.9/4420_grsecurity-3.1-4.9.9-201702122044.patch
index 147c250..32e3834 100644
--- a/4.8.17/4420_grsecurity-3.1-4.8.17-201701151620.patch
+++ b/4.9.9/4420_grsecurity-3.1-4.9.9-201702122044.patch
@@ -232,7 +232,7 @@ index 5385cba..607c6a0 100644
+zconf.lex.c
zoffset.h
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
-index 385a5ef..51d7fba 100644
+index 9b9c479..5a635ff 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
@@ -251,7 +251,7 @@ index 385a5ef..51d7fba 100644
=== 5 Kbuild clean infrastructure
-@@ -644,7 +645,29 @@ Both possibilities are described in the following.
+@@ -645,7 +646,29 @@ Both possibilities are described in the following.
Finally, the two .o files are linked to the executable, lxdialog.
Note: The syntax <executable>-y is not permitted for host-programs.
@@ -282,7 +282,7 @@ index 385a5ef..51d7fba 100644
kbuild offers support for host programs written in C++. This was
introduced solely to support kconfig, and is not recommended
-@@ -667,7 +690,7 @@ Both possibilities are described in the following.
+@@ -668,7 +691,7 @@ Both possibilities are described in the following.
qconf-cxxobjs := qconf.o
qconf-objs := check.o
@@ -291,7 +291,7 @@ index 385a5ef..51d7fba 100644
When compiling host programs, it is possible to set specific flags.
The programs will always be compiled utilising $(HOSTCC) passed
-@@ -695,7 +718,7 @@ Both possibilities are described in the following.
+@@ -696,7 +719,7 @@ Both possibilities are described in the following.
When linking qconf, it will be passed the extra option
"-L$(QTDIR)/lib".
@@ -300,7 +300,7 @@ index 385a5ef..51d7fba 100644
Kbuild will only build host-programs when they are referenced
as a prerequisite.
-@@ -726,7 +749,7 @@ Both possibilities are described in the following.
+@@ -727,7 +750,7 @@ Both possibilities are described in the following.
This will tell kbuild to build lxdialog even if not referenced in
any rule.
@@ -310,10 +310,10 @@ index 385a5ef..51d7fba 100644
A typical pattern in a Kbuild file looks like this:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index 46726d4..36138ff 100644
+index 922dec8..a45d4a2 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
-@@ -1368,6 +1368,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -1422,6 +1422,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
[KNL] Should the hard-lockup detector generate
backtraces on all cpus.
Format: <integer>
@@ -326,7 +326,7 @@ index 46726d4..36138ff 100644
hashdist= [KNL,NUMA] Large hashes allocated during boot
are distributed across NUMA nodes. Defaults on
-@@ -2591,6 +2597,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2651,6 +2657,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noexec=on: enable non-executable mappings (default)
noexec=off: disable non-executable mappings
@@ -337,7 +337,7 @@ index 46726d4..36138ff 100644
nosmap [X86]
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.
-@@ -2895,6 +2905,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2959,6 +2969,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
@@ -406,8 +406,20 @@ index ffab8b5..b8fcd61 100644
modules_disabled:
A toggle value indicating if modules are allowed to be loaded
+diff --git a/Kbuild b/Kbuild
+index 3d0ae15..84e5412 100644
+--- a/Kbuild
++++ b/Kbuild
+@@ -91,6 +91,7 @@ $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
+ always += missing-syscalls
+ targets += missing-syscalls
+
++GCC_PLUGINS_missing-syscalls := n
+ quiet_cmd_syscalls = CALL $<
+ cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags)
+
diff --git a/Makefile b/Makefile
-index ace32d3..f21d750 100644
+index c0c41c9..630adc4 100644
--- a/Makefile
+++ b/Makefile
@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
@@ -421,7 +433,7 @@ index ace32d3..f21d750 100644
ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
-@@ -717,7 +719,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
+@@ -731,7 +733,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
else
KBUILD_CFLAGS += -g
endif
@@ -430,7 +442,7 @@ index ace32d3..f21d750 100644
endif
ifdef CONFIG_DEBUG_INFO_DWARF4
KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
-@@ -892,7 +894,7 @@ export mod_sign_cmd
+@@ -910,7 +912,7 @@ export mod_sign_cmd
ifeq ($(KBUILD_EXTMOD),)
@@ -439,7 +451,7 @@ index ace32d3..f21d750 100644
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -1258,7 +1260,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
+@@ -1274,7 +1276,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
signing_key.pem signing_key.priv signing_key.x509 \
x509.genkey extra_certificates signing_key.x509.keyid \
@@ -451,7 +463,7 @@ index ace32d3..f21d750 100644
# clean - Delete most, but leave enough to build external modules
#
-@@ -1297,7 +1302,7 @@ distclean: mrproper
+@@ -1314,7 +1319,7 @@ distclean: mrproper
@find $(srctree) $(RCS_FIND_IGNORE) \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
@@ -461,10 +473,18 @@ index ace32d3..f21d750 100644
diff --git a/arch/Kconfig b/arch/Kconfig
-index fd6e971..35d7bbf 100644
+index 659bdd0..4179181 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
-@@ -355,7 +355,7 @@ config HAVE_GCC_PLUGINS
+@@ -164,6 +164,7 @@ config ARCH_USE_BUILTIN_BSWAP
+ config KRETPROBES
+ def_bool y
+ depends on KPROBES && HAVE_KRETPROBES
++ depends on !PAX_RAP
+
+ config USER_RETURN_NOTIFIER
+ bool
+@@ -355,7 +356,7 @@ config HAVE_GCC_PLUGINS
menuconfig GCC_PLUGINS
bool "GCC plugins"
depends on HAVE_GCC_PLUGINS
@@ -473,6 +493,14 @@ index fd6e971..35d7bbf 100644
help
GCC plugins are loadable modules that provide extra features to the
compiler. They are useful for runtime instrumentation and static analysis.
+@@ -759,6 +760,7 @@ config VMAP_STACK
+ default y
+ bool "Use a virtually-mapped stack"
+ depends on HAVE_ARCH_VMAP_STACK && !KASAN
++ depends on !GRKERNSEC_KSTACKOVERFLOW
+ ---help---
+ Enable this if you want the use virtually-mapped kernel stacks
+ with guard pages. This causes kernel stack overflows to be
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 498933a..78d2b22 100644
--- a/arch/alpha/include/asm/atomic.h
@@ -772,7 +800,7 @@ index 83e9eee..db02682 100644
/*
* This routine handles page faults. It determines the address,
-@@ -132,8 +250,29 @@ retry:
+@@ -132,8 +250,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
good_area:
si_code = SEGV_ACCERR;
if (cause < 0) {
@@ -803,23 +831,47 @@ index 83e9eee..db02682 100644
} else if (!cause) {
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
-diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
-index 0d3e59f..4418d65 100644
---- a/arch/arc/Kconfig
-+++ b/arch/arc/Kconfig
-@@ -541,6 +541,7 @@ config ARC_DBG_TLB_MISS_COUNT
- bool "Profile TLB Misses"
- default n
- select DEBUG_FS
-+ depends on !GRKERNSEC_KMEM
- help
- Counts number of I and D TLB Misses and exports them via Debugfs
- The counters can be cleared via Debugfs as well
+diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
+index 42b0504..6013221 100644
+--- a/arch/arc/kernel/kprobes.c
++++ b/arch/arc/kernel/kprobes.c
+@@ -424,6 +424,7 @@ static void __used kretprobe_trampoline_holder(void)
+ "kretprobe_trampoline:\n" "nop\n");
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -433,6 +434,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->blink = (unsigned long)&kretprobe_trampoline;
+ }
++#endif
+
+ static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+@@ -509,6 +511,7 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline_p);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
+@@ -516,6 +519,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
+ {
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index a9c4e48..75bc9c9 100644
+index b5d529f..0bb4d4f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -1621,6 +1621,7 @@ config AEABI
+@@ -1622,6 +1622,7 @@ config AEABI
config OABI_COMPAT
bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
depends on AEABI && !THUMB2_KERNEL
@@ -827,7 +879,7 @@ index a9c4e48..75bc9c9 100644
help
This option preserves the old syscall interface along with the
new (ARM EABI) one. It also provides a compatibility layer to
-@@ -1689,6 +1690,7 @@ config HIGHPTE
+@@ -1690,6 +1691,7 @@ config HIGHPTE
config CPU_SW_DOMAIN_PAN
bool "Enable use of CPU domains to implement privileged no-access"
depends on MMU && !ARM_LPAE
@@ -835,7 +887,7 @@ index a9c4e48..75bc9c9 100644
default y
help
Increase kernel security by ensuring that normal kernel accesses
-@@ -1765,7 +1767,7 @@ config ALIGNMENT_TRAP
+@@ -1766,7 +1768,7 @@ config ALIGNMENT_TRAP
config UACCESS_WITH_MEMCPY
bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
@@ -844,7 +896,7 @@ index a9c4e48..75bc9c9 100644
default y if CPU_FEROCEON
help
Implement faster copy_to_user and clear_user methods for CPU
-@@ -2020,6 +2022,7 @@ config KEXEC
+@@ -2021,6 +2023,7 @@ config KEXEC
depends on (!SMP || PM_SLEEP_SMP)
depends on !CPU_V7M
select KEXEC_CORE
@@ -852,7 +904,7 @@ index a9c4e48..75bc9c9 100644
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
-@@ -2064,7 +2067,7 @@ config EFI_STUB
+@@ -2065,7 +2068,7 @@ config EFI_STUB
config EFI
bool "UEFI runtime support"
@@ -862,7 +914,7 @@ index a9c4e48..75bc9c9 100644
select EFI_PARAMS_FROM_FDT
select EFI_STUB
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
-index a9693b6..87d8936 100644
+index d83f7c3..a6aba4c 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -7,6 +7,7 @@ config ARM_PTDUMP
@@ -874,18 +926,18 @@ index a9693b6..87d8936 100644
Say Y here if you want to show the kernel pagetable layout in a
debugfs file. This information is only useful for kernel developers
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
-index d50430c..01cc53b 100644
+index d50430c..39509a6 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
-@@ -103,6 +103,8 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
- KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
- endif
+@@ -24,6 +24,8 @@ endif
+
+ GCOV_PROFILE := n
-+KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
++GCC_PLUGINS := n
+
- # -fstack-protector-strong triggers protection checks in this code,
- # but it is being used too early to link to meaningful stack_chk logic.
- nossp_flags := $(call cc-option, -fno-stack-protector)
+ #
+ # Architecture dependencies
+ #
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 6fc73bf..d0af3c7b 100644
--- a/arch/arm/crypto/sha1_glue.c
@@ -1688,7 +1740,7 @@ index 75fe66b..2255c86 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
-index 9156fc3..0521e3e 100644
+index bdd283b..e66fb83 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -116,7 +116,7 @@ struct cpu_cache_fns {
@@ -2307,7 +2359,7 @@ index 5f833f7..76e6644 100644
}
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
-index a93c0f9..5c31bbb 100644
+index 1f59ea05..81245f0 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -18,6 +18,7 @@
@@ -2469,17 +2521,18 @@ index a93c0f9..5c31bbb 100644
__clear_user_std(void __user *addr, unsigned long n);
static inline unsigned long __must_check
-@@ -533,6 +565,9 @@ __clear_user(void __user *addr, unsigned long n)
-
+@@ -534,6 +566,10 @@ __clear_user(void __user *addr, unsigned long n)
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res = n;
++
+ if ((long)n < 0)
+ return n;
+
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else /* security hole - plug it */
-@@ -542,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+@@ -543,6 +579,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
@@ -2515,19 +2568,6 @@ index 7e45f69..2c047db 100644
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_ipv6_magic);
-diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
-index 7dccc96..84da243 100644
---- a/arch/arm/kernel/cpuidle.c
-+++ b/arch/arm/kernel/cpuidle.c
-@@ -19,7 +19,7 @@ extern struct of_cpuidle_method __cpuidle_method_of_table[];
- static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
- __used __section(__cpuidle_method_of_table_end);
-
--static struct cpuidle_ops cpuidle_ops[NR_CPUS];
-+static struct cpuidle_ops cpuidle_ops[NR_CPUS] __read_only;
-
- /**
- * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c
index 9f43ba0..1cee475 100644
--- a/arch/arm/kernel/efi.c
@@ -2916,29 +2956,6 @@ index 059c3da..8e45cfc 100644
if (!cache_is_vipt_nonaliasing())
flush_icache_range((unsigned long)base + offset, offset +
length);
-diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
-index 0c7efc3..3927085 100644
---- a/arch/arm/kernel/module-plts.c
-+++ b/arch/arm/kernel/module-plts.c
-@@ -30,17 +30,12 @@ struct plt_entries {
- u32 lit[PLT_ENT_COUNT];
- };
-
--static bool in_init(const struct module *mod, u32 addr)
--{
-- return addr - (u32)mod->init_layout.base < mod->init_layout.size;
--}
--
- u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
- {
- struct plt_entries *plt, *plt_end;
- int c, *count;
-
-- if (in_init(mod, loc)) {
-+ if (within_module_init(loc, mod)) {
- plt = (void *)mod->arch.init_plt->sh_addr;
- plt_end = (void *)plt + mod->arch.init_plt->sh_size;
- count = &mod->arch.init_plt_count;
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 4f14b5c..91ff261 100644
--- a/arch/arm/kernel/module.c
@@ -3016,7 +3033,7 @@ index 69bda1a..755113a 100644
if (waddr != addr) {
flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
-index 612eb53..5a44c8c 100644
+index 91d2d5b..042c26e 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -118,8 +118,8 @@ void __show_regs(struct pt_regs *regs)
@@ -3039,7 +3056,7 @@ index 612eb53..5a44c8c 100644
/*
* Copy the initial value of the domain access control register
* from the current thread: thread->addr_limit will have been
-@@ -337,7 +337,7 @@ static struct vm_area_struct gate_vma = {
+@@ -336,7 +336,7 @@ static struct vm_area_struct gate_vma = {
static int __init gate_vma_init(void)
{
@@ -3048,7 +3065,7 @@ index 612eb53..5a44c8c 100644
return 0;
}
arch_initcall(gate_vma_init);
-@@ -366,92 +366,14 @@ const char *arch_vma_name(struct vm_area_struct *vma)
+@@ -365,92 +365,14 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return is_gate_vma(vma) ? "[vectors]" : NULL;
}
@@ -3180,10 +3197,10 @@ index 3fa867a..d610607 100644
/*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
-index df7f2a7..d9d2bc1 100644
+index 34e3f3c..3d2dada 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
-@@ -112,21 +112,23 @@ EXPORT_SYMBOL(elf_hwcap);
+@@ -112,6 +112,8 @@ EXPORT_SYMBOL(elf_hwcap);
unsigned int elf_hwcap2 __read_mostly;
EXPORT_SYMBOL(elf_hwcap2);
@@ -3191,27 +3208,7 @@ index df7f2a7..d9d2bc1 100644
+pmdval_t __supported_pmd_mask __read_only;
#ifdef MULTI_CPU
--struct processor processor __read_mostly;
-+struct processor processor __read_only;
- #endif
- #ifdef MULTI_TLB
--struct cpu_tlb_fns cpu_tlb __read_mostly;
-+struct cpu_tlb_fns cpu_tlb __read_only;
- #endif
- #ifdef MULTI_USER
--struct cpu_user_fns cpu_user __read_mostly;
-+struct cpu_user_fns cpu_user __read_only;
- #endif
- #ifdef MULTI_CACHE
--struct cpu_cache_fns cpu_cache __read_mostly;
-+struct cpu_cache_fns cpu_cache __read_only;
- #endif
- #ifdef CONFIG_OUTER_CACHE
--struct outer_cache_fns outer_cache __read_mostly;
-+struct outer_cache_fns outer_cache __read_only;
- EXPORT_SYMBOL(outer_cache);
- #endif
-
+ struct processor processor __ro_after_init;
@@ -257,9 +259,13 @@ static int __get_cpu_architecture(void)
* Register 0 and check for VMSAv7 or PMSAv7 */
unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
@@ -3285,19 +3282,6 @@ index 7b8f214..ece8e28 100644
-
- return page;
-}
-diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
-index 8615216..f5be307 100644
---- a/arch/arm/kernel/smp.c
-+++ b/arch/arm/kernel/smp.c
-@@ -82,7 +82,7 @@ enum ipi_msg_type {
-
- static DECLARE_COMPLETION(cpu_running);
-
--static struct smp_operations smp_ops;
-+static struct smp_operations smp_ops __read_only;
-
- void __init smp_set_ops(const struct smp_operations *ops)
- {
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index b10e136..cb5edf9 100644
--- a/arch/arm/kernel/tcm.c
@@ -3311,7 +3295,7 @@ index b10e136..cb5edf9 100644
}
};
-@@ -362,7 +362,9 @@ no_dtcm:
+@@ -362,7 +362,9 @@ void __init tcm_init(void)
start = &__sitcm_text;
end = &__eitcm_text;
ram = &__itcm_start;
@@ -3322,7 +3306,7 @@ index b10e136..cb5edf9 100644
start, end);
itcm_present = true;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index bc69838..e5dfdd4 100644
+index 9688ec0..dd072c0 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
@@ -3334,7 +3318,7 @@ index bc69838..e5dfdd4 100644
#else
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
-@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+@@ -287,6 +287,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
@@ -3343,7 +3327,7 @@ index bc69838..e5dfdd4 100644
static unsigned long oops_begin(void)
{
int cpu;
-@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+@@ -329,6 +331,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
@@ -3354,7 +3338,7 @@ index bc69838..e5dfdd4 100644
do_exit(signr);
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
-index d24e5dd..77cf6cf 100644
+index f7f55df..49c9f9e 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -44,7 +44,8 @@
@@ -3368,7 +3352,7 @@ index d24e5dd..77cf6cf 100644
#define ARM_EXIT_DISCARD(x)
#else
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
-index c94b90d..0cc6830 100644
+index 19b5f5c..9aa8e58 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -59,7 +59,7 @@ static unsigned long hyp_default_vectors;
@@ -3380,7 +3364,7 @@ index c94b90d..0cc6830 100644
static u32 kvm_next_vmid;
static unsigned int kvm_vmid_bits __read_mostly;
static DEFINE_SPINLOCK(kvm_vmid_lock);
-@@ -388,7 +388,7 @@ void force_vm_exit(const cpumask_t *mask)
+@@ -423,7 +423,7 @@ void force_vm_exit(const cpumask_t *mask)
*/
static bool need_new_vmid_gen(struct kvm *kvm)
{
@@ -3389,7 +3373,7 @@ index c94b90d..0cc6830 100644
}
/**
-@@ -421,7 +421,7 @@ static void update_vttbr(struct kvm *kvm)
+@@ -456,7 +456,7 @@ static void update_vttbr(struct kvm *kvm)
/* First user of a new VMID generation? */
if (unlikely(kvm_next_vmid == 0)) {
@@ -3398,7 +3382,7 @@ index c94b90d..0cc6830 100644
kvm_next_vmid = 1;
/*
-@@ -438,7 +438,7 @@ static void update_vttbr(struct kvm *kvm)
+@@ -473,7 +473,7 @@ static void update_vttbr(struct kvm *kvm)
kvm_call_hyp(__kvm_flush_vm_context);
}
@@ -3434,19 +3418,6 @@ index 1712f13..a3165dc 100644
#include "csumpartialcopygeneric.S"
-diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
-index 8044591..c9b2609 100644
---- a/arch/arm/lib/delay.c
-+++ b/arch/arm/lib/delay.c
-@@ -29,7 +29,7 @@
- /*
- * Default to the loop-based delay implementation.
- */
--struct arm_delay_ops arm_delay_ops = {
-+struct arm_delay_ops arm_delay_ops __read_only = {
- .delay = __loop_delay,
- .const_udelay = __loop_const_udelay,
- .udelay = __loop_udelay,
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 6bd1089..e999400 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
@@ -3520,7 +3491,7 @@ diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index ae2a018..297ad08 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
-@@ -156,7 +156,7 @@ exit:
+@@ -156,7 +156,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
/*
* This ioremap hook is used on Armada 375/38x to ensure that all MMIO
@@ -3561,10 +3532,10 @@ index f39bd51..866c780 100644
extern void armada_370_xp_cpu_resume(void);
extern void armada_38x_cpu_resume(void);
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
-index b6443a4..20a0b74 100644
+index 6b6fda6..232df1e 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
-@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
+@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
}
#endif
@@ -3574,10 +3545,10 @@ index b6443a4..20a0b74 100644
};
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
-index ad98246..69437a8 100644
+index 7d62ad4..97774b1 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
-@@ -88,7 +88,7 @@ struct cpu_pm_ops {
+@@ -89,7 +89,7 @@ struct cpu_pm_ops {
void (*resume)(void);
void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
void (*hotplug_restart)(void);
@@ -3586,7 +3557,7 @@ index ad98246..69437a8 100644
static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
static struct powerdomain *mpuss_pd;
-@@ -106,7 +106,7 @@ static void dummy_cpu_resume(void)
+@@ -107,7 +107,7 @@ static void dummy_cpu_resume(void)
static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
{}
@@ -3773,7 +3744,7 @@ index a69b22d..8523a03 100644
#include <linux/irq.h>
#include <linux/kernel.h>
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
-index 8538910..2f39bc4 100644
+index a970e7f..6f2bf9a 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -10,6 +10,7 @@
@@ -3797,10 +3768,10 @@ index 7cd9865..a00b6ab 100644
#include "common.h"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
-index d15a7fe..6cc4fc9 100644
+index c1799dd..9111dcc 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
-@@ -445,6 +445,7 @@ config CPU_32v5
+@@ -446,6 +446,7 @@ config CPU_32v5
config CPU_32v6
bool
@@ -3808,7 +3779,7 @@ index d15a7fe..6cc4fc9 100644
select TLS_REG_EMUL if !CPU_32v6K && !MMU
config CPU_32v6K
-@@ -599,6 +600,7 @@ config CPU_CP15_MPU
+@@ -603,6 +604,7 @@ config CPU_CP15_MPU
config CPU_USE_DOMAINS
bool
@@ -3816,7 +3787,7 @@ index d15a7fe..6cc4fc9 100644
help
This option enables or disables the use of domain switching
via the set_fs() function.
-@@ -809,7 +811,7 @@ config NEED_KUSER_HELPERS
+@@ -813,7 +815,7 @@ config NEED_KUSER_HELPERS
config KUSER_HELPERS
bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
@@ -3825,7 +3796,7 @@ index d15a7fe..6cc4fc9 100644
default y
help
Warning: disabling this option may break user programs.
-@@ -823,7 +825,7 @@ config KUSER_HELPERS
+@@ -827,7 +829,7 @@ config KUSER_HELPERS
See Documentation/arm/kernel_user_helpers.txt for details.
However, the fixed address nature of these helpers can be used
@@ -3834,7 +3805,7 @@ index d15a7fe..6cc4fc9 100644
exploits.
If all of the binaries and libraries which run on your platform
-@@ -838,7 +840,7 @@ config KUSER_HELPERS
+@@ -842,7 +844,7 @@ config KUSER_HELPERS
config VDSO
bool "Enable VDSO for acceleration of some system calls"
@@ -3906,7 +3877,7 @@ index 7d5f4c7..c6a0816 100644
ai_sys += 1;
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
-index cc12905..88463b3 100644
+index d1870c7..36d500f 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -44,7 +44,7 @@ struct l2c_init_data {
@@ -4201,7 +4172,7 @@ index 3a2e678..ebdbf80 100644
inf->name, ifsr, addr);
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
-index 05ec5e0..0b70277 100644
+index 67532f2..10b646e 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -3,6 +3,7 @@
@@ -4228,8 +4199,8 @@ index 05ec5e0..0b70277 100644
+}
+
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
- unsigned long search_exception_table(unsigned long addr);
void early_abt_enable(void);
+
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 370581a..b985cc1 100644
--- a/arch/arm/mm/init.c
@@ -4416,14 +4387,13 @@ index 66353ca..8aad9f8 100644
}
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
-index 30fe03f..738d54e 100644
+index 4001dd1..c6dce7b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
-@@ -243,7 +243,15 @@ __setup("noalign", noalign_setup);
+@@ -243,6 +243,14 @@ __setup("noalign", noalign_setup);
#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
--static struct mem_type mem_types[] = {
+#ifdef CONFIG_PAX_KERNEXEC
+#define L_PTE_KERNEXEC L_PTE_RDONLY
+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
@@ -4432,11 +4402,10 @@ index 30fe03f..738d54e 100644
+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
+#endif
+
-+static struct mem_type mem_types[] __read_only = {
+ static struct mem_type mem_types[] __ro_after_init = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
- L_PTE_SHARED,
-@@ -272,19 +280,19 @@ static struct mem_type mem_types[] = {
+@@ -272,19 +280,19 @@ static struct mem_type mem_types[] __ro_after_init = {
.prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
@@ -4461,7 +4430,7 @@ index 30fe03f..738d54e 100644
.domain = DOMAIN_KERNEL,
},
#endif
-@@ -300,7 +308,7 @@ static struct mem_type mem_types[] = {
+@@ -300,7 +308,7 @@ static struct mem_type mem_types[] __ro_after_init = {
.prot_l1 = PMD_TYPE_TABLE,
.domain = DOMAIN_VECTORS,
},
@@ -4470,7 +4439,7 @@ index 30fe03f..738d54e 100644
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
-@@ -313,17 +321,30 @@ static struct mem_type mem_types[] = {
+@@ -313,17 +321,30 @@ static struct mem_type mem_types[] __ro_after_init = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
},
@@ -4504,7 +4473,7 @@ index 30fe03f..738d54e 100644
[MT_MEMORY_RW_DTCM] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_XN,
-@@ -331,9 +352,10 @@ static struct mem_type mem_types[] = {
+@@ -331,9 +352,10 @@ static struct mem_type mem_types[] __ro_after_init = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
@@ -4868,7 +4837,7 @@ index 93d0b6d..2db6d99 100644
emit_mov_i(r_off, k, ctx);
load_common:
ctx->seen |= SEEN_DATA | SEEN_CALL;
-@@ -568,18 +558,6 @@ load_common:
+@@ -568,18 +558,6 @@ static int build_body(struct jit_ctx *ctx)
condt = ARM_COND_HI;
}
@@ -4913,11 +4882,44 @@ index a5bc92d..0bb4730 100644
omap_sram_size - omap_sram_skip);
+ pax_close_kernel();
}
+diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
+index a4ec240..96faf9b 100644
+--- a/arch/arm/probes/kprobes/core.c
++++ b/arch/arm/probes/kprobes/core.c
+@@ -485,6 +485,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+ return (void *)orig_ret_address;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -493,6 +494,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr. */
+ regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
+ }
++#endif
+
+ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+ {
+@@ -605,10 +607,12 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+ return 0;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ return 0;
+ }
++#endif
+
+ #ifdef CONFIG_THUMB2_KERNEL
+
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
-index bc3f00f..88ded6a 100644
+index 969ef88..305b856 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -891,6 +891,7 @@ config RELOCATABLE
+@@ -896,6 +896,7 @@ config RELOCATABLE
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
@@ -4926,7 +4928,7 @@ index bc3f00f..88ded6a 100644
select RELOCATABLE
help
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
-index 0cc758c..de67415 100644
+index b661fe7..6d124fc 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -6,6 +6,7 @@ config ARM64_PTDUMP
@@ -5100,7 +5102,7 @@ index d25f4f1..61d52da 100644
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
{
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
-index e20bd43..7e476da 100644
+index ffbb9a5..d8b49ff 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -23,6 +23,9 @@
@@ -5113,7 +5115,7 @@ index e20bd43..7e476da 100644
/*
* VMALLOC range.
*
-@@ -718,6 +721,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
+@@ -728,6 +731,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
#define kc_vaddr_to_offset(v) ((v) & ~VA_START)
#define kc_offset_to_vaddr(o) ((o) | VA_START)
@@ -5181,7 +5183,7 @@ index 2eb714c..3a10471 100644
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
-index db84983..d256a3edc 100644
+index 55d0adb..b986918 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -110,6 +110,7 @@ static inline void set_fs(mm_segment_t fs)
@@ -5202,17 +5204,18 @@ index db84983..d256a3edc 100644
kasan_check_read(from, n);
check_object_size(from, n, true);
return __arch_copy_to_user(to, from, n);
-@@ -286,6 +290,9 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const v
-
+@@ -287,6 +291,10 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const v
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res = n;
++
+ if ((long)n < 0)
+ return n;
+
kasan_check_write(to, n);
if (access_ok(VERIFY_READ, from, n)) {
-@@ -298,6 +305,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+@@ -300,6 +308,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
@@ -5223,10 +5226,10 @@ index db84983..d256a3edc 100644
if (access_ok(VERIFY_WRITE, to, n)) {
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
-index 65d81f9..6a46f09 100644
+index d55a7b0..d8dbd8a 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
-@@ -166,7 +166,7 @@ EXPORT_SYMBOL(arch_hibernation_header_restore);
+@@ -198,7 +198,7 @@ EXPORT_SYMBOL(arch_hibernation_header_restore);
static int create_safe_exec_page(void *src_start, size_t length,
unsigned long dst_addr,
phys_addr_t *phys_dst_addr,
@@ -5235,7 +5238,7 @@ index 65d81f9..6a46f09 100644
gfp_t mask)
{
int rc = 0;
-@@ -174,7 +174,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+@@ -206,7 +206,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -5244,7 +5247,7 @@ index 65d81f9..6a46f09 100644
if (!dst) {
rc = -ENOMEM;
-@@ -184,9 +184,9 @@ static int create_safe_exec_page(void *src_start, size_t length,
+@@ -216,9 +216,9 @@ static int create_safe_exec_page(void *src_start, size_t length,
memcpy((void *)dst, src_start, length);
flush_icache_range(dst, dst + length);
@@ -5256,7 +5259,7 @@ index 65d81f9..6a46f09 100644
if (!pud) {
rc = -ENOMEM;
goto out;
-@@ -196,7 +196,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+@@ -228,7 +228,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
pud = pud_offset(pgd, dst_addr);
if (pud_none(*pud)) {
@@ -5265,7 +5268,7 @@ index 65d81f9..6a46f09 100644
if (!pmd) {
rc = -ENOMEM;
goto out;
-@@ -206,7 +206,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+@@ -238,7 +238,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
pmd = pmd_offset(pud, dst_addr);
if (pmd_none(*pmd)) {
@@ -5274,7 +5277,7 @@ index 65d81f9..6a46f09 100644
if (!pte) {
rc = -ENOMEM;
goto out;
-@@ -449,7 +449,7 @@ int swsusp_arch_resume(void)
+@@ -510,7 +510,7 @@ int swsusp_arch_resume(void)
rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
(unsigned long)hibernate_exit,
&phys_hibernate_exit,
@@ -5283,25 +5286,28 @@ index 65d81f9..6a46f09 100644
if (rc) {
pr_err("Failed to create safe executable page for hibernate_exit code.");
goto out;
-diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
-index 37e47a9..f8597fc 100644
---- a/arch/arm64/kernel/probes/decode-insn.c
-+++ b/arch/arm64/kernel/probes/decode-insn.c
-@@ -157,10 +157,10 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
- mod = __module_address((unsigned long)addr);
- if (mod && within_module_init((unsigned long)addr, mod) &&
- !within_module_init((unsigned long)scan_end, mod))
-- scan_end = (kprobe_opcode_t *)mod->init_layout.base;
-+ scan_end = (kprobe_opcode_t *)mod->init_layout.base_rx;
- else if (mod && within_module_core((unsigned long)addr, mod) &&
- !within_module_core((unsigned long)scan_end, mod))
-- scan_end = (kprobe_opcode_t *)mod->core_layout.base;
-+ scan_end = (kprobe_opcode_t *)mod->core_layout.base_rx;
- preempt_enable();
- }
- #endif
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
+index f5077ea..46b4664 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -639,6 +639,7 @@ void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
+ return (void *)orig_ret_address;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -652,6 +653,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ return 0;
+ }
++#endif
+
+ int __init arch_init_kprobes(void)
+ {
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
-index 9cc8667..5edbcff 100644
+index 01753cd..b65d17a 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
@@ -5347,7 +5353,7 @@ index 9cc8667..5edbcff 100644
/* Disable interrupts first */
local_irq_disable();
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
-index d34fd72..8b6faee 100644
+index c2efddf..c58e0a2 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -95,8 +95,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
@@ -5362,10 +5368,10 @@ index d34fd72..8b6faee 100644
/* orig_sp is the saved pt_regs, find the elr */
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
-index 9595d3d..7ee5abb 100644
+index 11e5eae..d8cdfa7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
-@@ -512,7 +512,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
+@@ -547,7 +547,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
__show_regs(regs);
}
@@ -5427,7 +5433,7 @@ index 479330b..53717a8 100644
#endif /* __ASM_AVR32_KMAP_TYPES_H */
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
-index a4b7eda..d057f9e 100644
+index b3977e9..4230c51a 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
@@ -5454,7 +5460,7 @@ index a4b7eda..d057f9e 100644
/*
* This routine handles page faults. It determines the address and the
* problem, and then passes it off to one of the appropriate routines.
-@@ -178,6 +195,16 @@ bad_area:
+@@ -178,6 +195,16 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
@@ -5648,21 +5654,21 @@ index 18ca6a9..77b0e0d 100644
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
-index c100d78..07538cc 100644
+index c100d78..c44d46d 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -98,5 +98,6 @@ endef
archprepare: make_nr_irqs_h
PHONY += make_nr_irqs_h
-+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
++GCC_PLUGINS_make_nr_irqs_h := n
make_nr_irqs_h:
$(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
-index f565ad3..484af46 100644
+index 65d4bb2..8b2e661 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
-@@ -307,4 +307,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
+@@ -323,4 +323,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
#define atomic64_inc(v) atomic64_add(1, (v))
#define atomic64_dec(v) atomic64_sub(1, (v))
@@ -5859,6 +5865,39 @@ index bfe1319..da0014b 100644
memset(to, 0, n);
return n;
}
+diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
+index c7c5144..7e31461 100644
+--- a/arch/ia64/kernel/kprobes.c
++++ b/arch/ia64/kernel/kprobes.c
+@@ -499,6 +499,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+ return 1;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -507,6 +508,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
+ }
++#endif
+
+ /* Check the instruction in the slot is break */
+ static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot)
+@@ -1119,6 +1121,7 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline_p);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr ==
+@@ -1127,3 +1130,4 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 6ab0ae7..88f1b60 100644
--- a/arch/ia64/kernel/module.c
@@ -5962,10 +6001,10 @@ index 41e33f8..65180b2a 100644
}
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
-index dc506b0..39baade 100644
+index f89d20c..410a1b1 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
-@@ -171,7 +171,7 @@ SECTIONS {
+@@ -172,7 +172,7 @@ SECTIONS {
/* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE);
PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
@@ -6002,7 +6041,7 @@ index fa6ad95..b46bd89 100644
# define VM_READ_BIT 0
# define VM_WRITE_BIT 1
# define VM_EXEC_BIT 2
-@@ -151,8 +168,21 @@ retry:
+@@ -151,8 +168,21 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area;
@@ -6191,10 +6230,10 @@ index 5c3f688..f8cc1b3 100644
# platform specific definitions
include arch/mips/Kbuild.platforms
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
-index 212ff92..36b3437 100644
+index b3c5bde..d6b5104 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -50,6 +50,7 @@ config MIPS
+@@ -49,6 +49,7 @@ config MIPS
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select VIRT_TO_BUS
@@ -6202,7 +6241,7 @@ index 212ff92..36b3437 100644
select MODULES_USE_ELF_REL if MODULES
select MODULES_USE_ELF_RELA if MODULES && 64BIT
select CLONE_BACKWARDS
-@@ -2561,7 +2562,7 @@ config RELOCATION_TABLE_SIZE
+@@ -2595,7 +2596,7 @@ config RELOCATION_TABLE_SIZE
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
@@ -6211,7 +6250,7 @@ index 212ff92..36b3437 100644
---help---
Randomizes the physical and virtual address at which the
kernel image is loaded, as a security feature that
-@@ -2777,6 +2778,7 @@ source "kernel/Kconfig.preempt"
+@@ -2811,6 +2812,7 @@ source "kernel/Kconfig.preempt"
config KEXEC
bool "Kexec system call"
select KEXEC_CORE
@@ -6949,7 +6988,7 @@ index 9e8ef59..1139d6b 100644
/*
* interrupt-retrigger: NOP for now. This may not be appropriate for all
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
-index 15e0fec..3ee3eec 100644
+index 6bf10e7..3c0b52f 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -11,7 +11,6 @@
@@ -7065,7 +7104,7 @@ index 5f98759..a3a7cb2 100644
typedef struct { unsigned long long pte; } pte_t;
#define pte_val(x) ((x).pte)
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
-index 93c079a..1d6bf7c 100644
+index a03e869..e1928f5 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
@@ -7081,7 +7120,7 @@ index 93c079a..1d6bf7c 100644
/*
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
-index 70128d3..471bc25 100644
+index 9e9e94415..43354f5 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -20,6 +20,9 @@
@@ -7137,10 +7176,10 @@ index e309d8f..20eefec 100644
/*
* We stash processor id into a COP0 register to retrieve it fast
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
-index 21a2aab..c00b80d 100644
+index 89fa5c0b..9409cea 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
-@@ -147,6 +147,7 @@ static inline bool eva_kernel_access(void)
+@@ -148,6 +148,7 @@ static inline bool eva_kernel_access(void)
__ok == 0; \
})
@@ -7149,10 +7188,10 @@ index 21a2aab..c00b80d 100644
likely(__access_ok((addr), (size), __access_mask))
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
-index 58ad63d..051b4b7 100644
+index 9c7f3e1..ed42bda 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
-@@ -36,6 +36,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+@@ -37,6 +37,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
@@ -7164,13 +7203,13 @@ index 58ad63d..051b4b7 100644
+#endif
+
#include <asm/processor.h>
- #include <linux/module.h>
#include <linux/elfcore.h>
+ #include <linux/compat.h>
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
-index 49fb881..b9ab7c2 100644
+index 1ab3432..0e66879 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
-@@ -40,6 +40,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+@@ -41,6 +41,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
@@ -7183,7 +7222,7 @@ index 49fb881..b9ab7c2 100644
+
#include <asm/processor.h>
- #include <linux/module.h>
+ #include <linux/elfcore.h>
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
index 44a1f79..2bd6aa3 100644
--- a/arch/mips/kernel/irq-gt641xx.c
@@ -7239,11 +7278,47 @@ index f25f7ea..19e1c62 100644
}
}
#else
+diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
+index f5c8bce..b06a560 100644
+--- a/arch/mips/kernel/kprobes.c
++++ b/arch/mips/kernel/kprobes.c
+@@ -535,6 +535,7 @@ static void __used kretprobe_trampoline_holder(void)
+
+ void kretprobe_trampoline(void);
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -543,6 +544,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->regs[31] = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ /*
+ * Called when the probe at kretprobe trampoline is hit
+@@ -611,6 +613,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ return 1;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+@@ -618,6 +621,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)kretprobe_trampoline,
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
-index 5b31a94..15ac4a1 100644
+index 7cf653e..7df52f6 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
-@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
+@@ -168,7 +168,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
nc_core_ready_count = nc_addr;
/* Ensure ready_count is zero-initialised before the assembly runs */
@@ -7253,10 +7328,10 @@ index 5b31a94..15ac4a1 100644
/* Run the generated entry code */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
-index d2d0615..46c1803 100644
+index 9514e5f..a3fc550 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
-@@ -545,18 +545,6 @@ out:
+@@ -545,18 +545,6 @@ unsigned long get_wchan(struct task_struct *task)
return pc;
}
@@ -7276,7 +7351,7 @@ index d2d0615..46c1803 100644
{
struct pt_regs *regs;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
-index 6103b24..8253315 100644
+index a92994d..e389b11 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -882,6 +882,10 @@ long arch_ptrace(struct task_struct *child, long request,
@@ -7372,10 +7447,10 @@ index 4472a7f..c5905e6 100644
}
/* Arrange for an interrupt in a short while */
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
-index 3de85be..73560ec 100644
+index 3905003..7c0cc88 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
-@@ -695,7 +695,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
+@@ -702,7 +702,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
};
prev_state = exception_enter();
@@ -7396,7 +7471,7 @@ index 3de85be..73560ec 100644
force_sig_info(SIGFPE, &info, current);
exception_exit(prev_state);
diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
-index 927dc94..27269ee 100644
+index c3e2205..b4302f8 100644
--- a/arch/mips/lib/ashldi3.c
+++ b/arch/mips/lib/ashldi3.c
@@ -2,7 +2,11 @@
@@ -7442,7 +7517,7 @@ index 927dc94..27269ee 100644
EXPORT_SYMBOL(__ashldi3);
+#endif
diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
-index 9fdf1a5..6741f0e 100644
+index 1745602..d20aabf 100644
--- a/arch/mips/lib/ashrdi3.c
+++ b/arch/mips/lib/ashrdi3.c
@@ -2,7 +2,11 @@
@@ -7524,10 +7599,10 @@ index 05909d58..b03284b 100644
#endif /* __ASM_LIBGCC_H */
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
-index 9560ad7..da27540 100644
+index 3bef306..fcec133 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
-@@ -31,6 +31,23 @@
+@@ -30,6 +30,23 @@
int show_unhandled_signals = 1;
@@ -7551,7 +7626,7 @@ index 9560ad7..da27540 100644
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
-@@ -205,6 +222,14 @@ bad_area:
+@@ -204,6 +221,14 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
@@ -7567,7 +7642,7 @@ index 9560ad7..da27540 100644
tsk->thread.error_code = write;
if (show_unhandled_signals &&
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
-index 72f7478..06abd2a 100644
+index e86ebcf..7a78a07 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -474,10 +474,10 @@ void __init mem_init(void)
@@ -7584,7 +7659,7 @@ index 72f7478..06abd2a 100644
}
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
-index 3530376..754dde3 100644
+index d08ea3f..66bb13d 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
@@ -7684,7 +7759,7 @@ diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index 160b880..3b53fdc 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
-@@ -270,7 +270,7 @@ spurious_8259A_irq:
+@@ -270,7 +270,7 @@ void sni_rm200_mask_and_ack_8259A(struct irq_data *d)
"spurious RM200 8259A interrupt: IRQ%d.\n", irq);
spurious_irq_mask |= irqmask;
}
@@ -7770,10 +7845,10 @@ index bcb5df2..84fabd2 100644
#define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
-index 4ce7a01..449202a 100644
+index 5f55da9..7ce9437 100644
--- a/arch/openrisc/include/asm/cache.h
+++ b/arch/openrisc/include/asm/cache.h
-@@ -19,11 +19,13 @@
+@@ -19,13 +19,15 @@
#ifndef __ASM_OPENRISC_CACHE_H
#define __ASM_OPENRISC_CACHE_H
@@ -7783,6 +7858,8 @@ index 4ce7a01..449202a 100644
* they shouldn't be hard-coded!
*/
+ #define __ro_after_init __read_mostly
+
-#define L1_CACHE_BYTES 16
#define L1_CACHE_SHIFT 4
+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
@@ -7895,30 +7972,43 @@ index 3a4ed9f..29b7218 100644
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
-index 4828478..89b1fbe 100644
+index 9a2aee1..a8e588f 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
-@@ -221,17 +221,17 @@ static inline unsigned long __must_check copy_from_user(void *to,
- const void __user *from,
- unsigned long n)
- {
-- int sz = __compiletime_object_size(to);
-+ size_t sz = __compiletime_object_size(to);
- unsigned long ret = n;
-
-- if (likely(sz == -1 || sz >= n))
-+ if (likely(sz == (size_t)-1 || sz >= n))
- ret = __copy_from_user(to, from, n);
- else if (!__builtin_constant_p(n))
- copy_user_overflow(sz, n);
+@@ -223,10 +223,10 @@ static inline void copy_user_overflow(int size, unsigned long count)
+ static __always_inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+ unsigned long ret = n;
+
+- if (likely(sz < 0 || sz >= n)) {
++ if (likely(sz == (size_t)-1 || sz >= n)) {
+ check_object_size(to, n, false);
+ ret = __copy_from_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+@@ -234,7 +234,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
else
- __bad_copy_user();
+ __bad_copy_user();
- if (unlikely(ret))
+ if (unlikely(ret && (long)ret > 0))
memset(to + (n - ret), 0, ret);
- return ret;
- }
+
+ return ret;
+@@ -243,9 +243,9 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+ static __always_inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- int sz = __compiletime_object_size(from);
++ size_t sz = __compiletime_object_size(to);
+
+- if (likely(sz < 0 || sz >= n)) {
++ if (likely(sz == (size_t)-1 || sz >= n)) {
+ check_object_size(from, n, true);
+ n = __copy_to_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index a0ecdb4a..71d2069 100644
--- a/arch/parisc/kernel/module.c
@@ -8069,7 +8159,7 @@ index 0a393a0..5b3199e0 100644
mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
-index 97d6b20..2ab0232 100644
+index 378df92..9b2ab51 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -719,9 +719,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
@@ -8084,12 +8174,12 @@ index 97d6b20..2ab0232 100644
fault_space = regs->iasq[0];
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
-index 163af2c..ed77b14 100644
+index 1a0b4f6..f9d326d 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
- #include <linux/module.h>
+ #include <linux/extable.h>
#include <linux/uaccess.h>
+#include <linux/unistd.h>
@@ -8221,7 +8311,7 @@ index 163af2c..ed77b14 100644
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fix;
-@@ -230,8 +341,33 @@ retry:
+@@ -281,8 +392,33 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
good_area:
@@ -8257,10 +8347,10 @@ index 163af2c..ed77b14 100644
/*
* If for any reason at all we couldn't handle the fault, make
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index 792cb17..1a96a22 100644
+index 65fba4c..3cfec12 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -146,6 +146,7 @@ config PPC
+@@ -140,6 +140,7 @@ config PPC
select ARCH_USE_BUILTIN_BSWAP
select OLD_SIGSUSPEND
select OLD_SIGACTION if PPC32
@@ -8268,7 +8358,7 @@ index 792cb17..1a96a22 100644
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
-@@ -446,6 +447,7 @@ config KEXEC
+@@ -441,6 +442,7 @@ config KEXEC
bool "kexec system call"
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
select KEXEC_CORE
@@ -8277,7 +8367,7 @@ index 792cb17..1a96a22 100644
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
-index f08d567..94e5497 100644
+index 2b90335..5e1a3d6 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -12,6 +12,11 @@
@@ -8522,9 +8612,9 @@ index f08d567..94e5497 100644
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %0,0,%1 # __atomic_add_unless\n\
cmpw 0,%0,%3 \n\
-- beq- 2f \n\
+- beq 2f \n\
- add %0,%2,%0 \n"
-+ beq- 5f \n"
++ beq 5f \n"
+
+ __OVERFLOW_PRE
+ __REFCOUNT_OP(add) " %0,%2,%0 \n"
@@ -8746,9 +8836,9 @@ index f08d567..94e5497 100644
-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
+"1: ldarx %0,0,%1 # atomic64_add_unless\n\
cmpd 0,%0,%3 \n\
-- beq- 2f \n\
+- beq 2f \n\
- add %0,%2,%0 \n"
-+ beq- 5f \n"
++ beq 5f \n"
+
+ __OVERFLOW_PRE
+ __REFCOUNT_OP(add) " %0,%2,%0 \n"
@@ -8779,11 +8869,11 @@ index 880db13..bb4ed4a 100644
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
-index 38b33dc..945d1f1 100644
+index 6b8b2d5..cf17a29 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
-@@ -226,7 +226,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
- static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+@@ -227,7 +227,7 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
+ pte_t *ptep, pte_t entry)
{
unsigned long set = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
@@ -9144,7 +9234,7 @@ index 4ba26dd..2d1137d 100644
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
-index 52cbf04..c41eb7e 100644
+index 9e1499f..4e03a24 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -270,6 +270,7 @@
@@ -9428,22 +9518,19 @@ index c266227..f3dc6bb 100644
static inline unsigned long clear_user(void __user *addr, unsigned long size)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
-index fe4c075..fcb4600 100644
+index 1925341..a1841ac 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
-@@ -14,6 +14,11 @@ CFLAGS_prom_init.o += -fPIC
- CFLAGS_btext.o += -fPIC
+@@ -15,7 +15,7 @@ CFLAGS_btext.o += -fPIC
endif
-+CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+-CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
-+CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
-+CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
-+
- ifdef CONFIG_FUNCTION_TRACER
- # Do not trace early boot code
- CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-@@ -26,6 +31,8 @@ CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+
+@@ -31,6 +31,8 @@ CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_time.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
endif
@@ -9475,10 +9562,10 @@ index 38a1f96..ed94e42 100644
ld r4,_DAR(r1)
bl bad_page_fault
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
-index bffec73..9cc5a35 100644
+index 1ba82ea..f78bd700 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
-@@ -1520,10 +1520,10 @@ handle_page_fault:
+@@ -1445,10 +1445,10 @@ handle_page_fault:
11: ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -9491,10 +9578,10 @@ index bffec73..9cc5a35 100644
addi r3,r1,STACK_FRAME_OVERHEAD
lwz r4,_DAR(r1)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
-index 08887cf..0c98725 100644
+index 3c05c31..a8e6888 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
-@@ -477,6 +477,8 @@ void migrate_irqs(void)
+@@ -482,6 +482,8 @@ void migrate_irqs(void)
}
#endif
@@ -9503,7 +9590,7 @@ index 08887cf..0c98725 100644
static inline void check_stack_overflow(void)
{
#ifdef CONFIG_DEBUG_STACKOVERFLOW
-@@ -489,6 +491,7 @@ static inline void check_stack_overflow(void)
+@@ -494,6 +496,7 @@ static inline void check_stack_overflow(void)
pr_err("do_IRQ: stack overflow: %ld\n",
sp - sizeof(struct thread_info));
dump_stack();
@@ -9511,6 +9598,39 @@ index 08887cf..0c98725 100644
}
#endif
}
+diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
+index e785cc9..514488c 100644
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -131,6 +131,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ kcb->kprobe_saved_msr = regs->msr;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -139,6 +140,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->link = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ static int __kprobes kprobe_handler(struct pt_regs *regs)
+ {
+@@ -547,6 +549,7 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline_p);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
+@@ -554,3 +557,4 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 5a7a78f..c0e4207 100644
--- a/arch/powerpc/kernel/module_32.c
@@ -9554,10 +9674,10 @@ index 5a7a78f..c0e4207 100644
sechdrs, module);
if (!module->arch.tramp)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index ad37aa1..51da6c4 100644
+index 49a680d..2514bbcb 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
-@@ -1360,8 +1360,8 @@ void show_regs(struct pt_regs * regs)
+@@ -1375,8 +1375,8 @@ void show_regs(struct pt_regs * regs)
* Lookup NIP late so we have the best change of getting the
* above info out without failing
*/
@@ -9568,7 +9688,7 @@ index ad37aa1..51da6c4 100644
#endif
show_stack(current, (unsigned long *) regs->gpr[1]);
if (!user_mode(regs))
-@@ -1882,10 +1882,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1897,10 +1897,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
@@ -9576,12 +9696,12 @@ index ad37aa1..51da6c4 100644
+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((ip == rth) && curr_frame >= 0) {
-- printk(" (%pS)",
-+ printk(" (%pA)",
+- pr_cont(" (%pS)",
++ pr_cont(" (%pA)",
(void *)current->ret_stack[curr_frame].ret);
curr_frame--;
}
-@@ -1905,7 +1905,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1920,7 +1920,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);
lr = regs->link;
@@ -9590,7 +9710,7 @@ index ad37aa1..51da6c4 100644
regs->trap, (void *)regs->nip, (void *)lr);
firstframe = 1;
}
-@@ -1942,13 +1942,6 @@ void notrace __ppc64_runlatch_off(void)
+@@ -1957,13 +1957,6 @@ void notrace __ppc64_runlatch_off(void)
}
#endif /* CONFIG_PPC64 */
@@ -9605,10 +9725,19 @@ index ad37aa1..51da6c4 100644
{
unsigned long rnd = 0;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
-index bf91658..edd21f8 100644
+index 5c8f12f..98047fb 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
-@@ -3312,6 +3312,10 @@ static int do_seccomp(struct pt_regs *regs)
+@@ -3151,7 +3151,7 @@ static int do_seccomp(struct pt_regs *regs)
+ * have already loaded -ENOSYS into r3, or seccomp has put
+ * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
+ */
+- if (__secure_computing(NULL))
++ if (secure_computing(NULL))
+ return -1;
+
+ /*
+@@ -3169,6 +3169,10 @@ static int do_seccomp(struct pt_regs *regs)
static inline int do_seccomp(struct pt_regs *regs) { return 0; }
#endif /* CONFIG_SECCOMP */
@@ -9619,7 +9748,7 @@ index bf91658..edd21f8 100644
/**
* do_syscall_trace_enter() - Do syscall tracing on kernel entry.
* @regs: the pt_regs of the task to trace (current)
-@@ -3335,6 +3339,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+@@ -3192,6 +3196,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
{
user_exit();
@@ -9631,7 +9760,7 @@ index bf91658..edd21f8 100644
/*
* The tracer may decide to abort the syscall, if so tracehook
* will return !0. Note that the tracer may also just change
-@@ -3353,6 +3362,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+@@ -3210,6 +3219,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
if (regs->gpr[0] >= NR_syscalls)
goto skip;
@@ -9639,7 +9768,7 @@ index bf91658..edd21f8 100644
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gpr[0]);
-@@ -3384,6 +3394,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
+@@ -3241,6 +3251,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
@@ -9652,33 +9781,33 @@ index bf91658..edd21f8 100644
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
-index a7daf74..d8159e5 100644
+index 27aa913..dc0d9f5 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
-@@ -1000,7 +1000,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+@@ -1006,7 +1006,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
/* Save user registers on the stack */
frame = &rt_sf->uc.uc_mcontext;
addr = frame;
-- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
-+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+- if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base != ~0UL) {
sigret = 0;
- tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+ tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
} else {
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
-index 70409bb..6cc6990 100644
+index 96698fd..fe57485 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
-@@ -770,7 +770,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
- current->thread.fp_state.fpscr = 0;
+@@ -791,7 +791,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ tsk->thread.fp_state.fpscr = 0;
/* Set up to return from userspace. */
-- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
-+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
- regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+- if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base != ~0UL) {
+ regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp;
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
-index 62859eb..035955d 100644
+index 023a462..9d940854 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -37,6 +37,7 @@
@@ -9689,16 +9818,16 @@ index 62859eb..035955d 100644
#include <asm/emulated_ops.h>
#include <asm/pgtable.h>
-@@ -145,6 +146,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
- return flags;
+@@ -146,6 +147,8 @@ static unsigned long oops_begin(struct pt_regs *regs)
}
+ NOKPROBE_SYMBOL(oops_begin);
+extern void gr_handle_kernel_exploit(void);
+
- static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+ static void oops_end(unsigned long flags, struct pt_regs *regs,
int signr)
{
-@@ -194,6 +197,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+@@ -195,6 +198,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
@@ -9707,8 +9836,8 @@ index 62859eb..035955d 100644
+
do_exit(signr);
}
-
-@@ -1145,6 +1151,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
+ NOKPROBE_SYMBOL(oops_end);
+@@ -1162,6 +1168,26 @@ void program_check_exception(struct pt_regs *regs)
enum ctx_state prev_state = exception_enter();
unsigned int reason = get_reason(regs);
@@ -9801,7 +9930,7 @@ index 5eea6f3..5d10396 100644
EXPORT_SYMBOL(copy_in_user);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
-index bb1ffc5..9ae5cb6 100644
+index d0b137d..af92bde 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -34,6 +34,10 @@
@@ -9849,7 +9978,7 @@ index bb1ffc5..9ae5cb6 100644
/*
* Check whether the instruction at regs->nip is a store using
* an update addressing form which will update r1.
-@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+@@ -227,7 +258,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* indicate errors in DSISR but can validly be set in SRR1.
*/
if (trap == 0x400)
@@ -9858,7 +9987,7 @@ index bb1ffc5..9ae5cb6 100644
else
is_write = error_code & DSISR_ISSTORE;
#else
-@@ -384,12 +415,16 @@ good_area:
+@@ -384,12 +415,16 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* "undefined". Of those that can be set, this is the only
* one which seems bad.
*/
@@ -9876,7 +10005,7 @@ index bb1ffc5..9ae5cb6 100644
/*
* Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing.
-@@ -484,6 +519,23 @@ bad_area:
+@@ -484,6 +519,23 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
bad_area_nosemaphore:
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
@@ -10123,6 +10252,37 @@ index 52d7c87..577d292 100644
if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
+diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
+index fdb4042..b72ae72 100644
+--- a/arch/s390/kernel/kprobes.c
++++ b/arch/s390/kernel/kprobes.c
+@@ -269,6 +269,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb)
+ }
+ NOKPROBE_SYMBOL(pop_kprobe);
+
++#ifdef CONFIG_KRETPROBES
+ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
+ {
+ ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
+@@ -277,6 +278,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
+ regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
+ }
+ NOKPROBE_SYMBOL(arch_prepare_kretprobe);
++#endif
+
+ static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
+ {
+@@ -740,8 +742,10 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int arch_trampoline_kprobe(struct kprobe *p)
+ {
+ return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
+ }
+ NOKPROBE_SYMBOL(arch_trampoline_kprobe);
++#endif
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index fbc0789..e7962a1 100644
--- a/arch/s390/kernel/module.c
@@ -10323,6 +10483,42 @@ index ef9e555..331bd29 100644
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
+index 83acbf3..fa67491 100644
+--- a/arch/sh/kernel/kprobes.c
++++ b/arch/sh/kernel/kprobes.c
+@@ -72,6 +72,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (*p->addr == BREAKPOINT_INSTRUCTION)
+@@ -79,6 +80,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ /**
+ * If an illegal slot instruction exception occurs for an address
+@@ -203,6 +205,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+ }
+
+ /* Called with kretprobe_lock held */
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -211,6 +214,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->pr = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ static int __kprobes kprobe_handler(struct pt_regs *regs)
+ {
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 6777177..d44b592 100644
--- a/arch/sh/mm/mmap.c
@@ -10417,10 +10613,10 @@ index 6777177..d44b592 100644
addr = vm_unmapped_area(&info);
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
-index 59b0960..75a8bcb 100644
+index 165ecdd..2bac5bf 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
-@@ -39,6 +39,7 @@ config SPARC
+@@ -38,6 +38,7 @@ config SPARC
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select MODULES_USE_ELF_RELA
@@ -10660,7 +10856,7 @@ index a24e41f..47677ff 100644
instruction set this cpu supports. This can NOT be done in userspace
on Sparc. */
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
-index 9331083..59c0499 100644
+index 3f2d403..4385ed9 100644
--- a/arch/sparc/include/asm/elf_64.h
+++ b/arch/sparc/include/asm/elf_64.h
@@ -190,6 +190,13 @@ typedef struct {
@@ -10793,18 +10989,10 @@ index 29d64b1..4272fe8 100644
extern int sysctl_tsb_ratio;
#endif
diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
-index 87990b7..352fff0 100644
+index 07c9f2e..352fff0 100644
--- a/arch/sparc/include/asm/spinlock_64.h
+++ b/arch/sparc/include/asm/spinlock_64.h
-@@ -96,14 +96,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
-
- /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
-
--static void inline arch_read_lock(arch_rwlock_t *lock)
-+static inline void arch_read_lock(arch_rwlock_t *lock)
- {
- unsigned long tmp1, tmp2;
-
+@@ -103,7 +103,12 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
__asm__ __volatile__ (
"1: ldsw [%2], %0\n"
" brlz,pn %0, 2f\n"
@@ -10818,7 +11006,7 @@ index 87990b7..352fff0 100644
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
-@@ -116,10 +121,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
+@@ -116,7 +121,7 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock)
@@ -10826,12 +11014,8 @@ index 87990b7..352fff0 100644
+ : "memory", "cc");
}
--static int inline arch_read_trylock(arch_rwlock_t *lock)
-+static inline int arch_read_trylock(arch_rwlock_t *lock)
- {
- int tmp1, tmp2;
-
-@@ -127,7 +132,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+ static inline int arch_read_trylock(arch_rwlock_t *lock)
+@@ -127,7 +132,12 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
"1: ldsw [%2], %0\n"
" brlz,a,pn %0, 2f\n"
" mov 0, %0\n"
@@ -10845,14 +11029,7 @@ index 87990b7..352fff0 100644
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
-@@ -140,13 +150,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
- return tmp1;
- }
-
--static void inline arch_read_unlock(arch_rwlock_t *lock)
-+static inline void arch_read_unlock(arch_rwlock_t *lock)
- {
- unsigned long tmp1, tmp2;
+@@ -146,7 +156,12 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
__asm__ __volatile__(
"1: lduw [%2], %0\n"
@@ -10866,33 +11043,6 @@ index 87990b7..352fff0 100644
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%xcc, 1b\n"
-@@ -156,7 +171,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
- : "memory");
- }
-
--static void inline arch_write_lock(arch_rwlock_t *lock)
-+static inline void arch_write_lock(arch_rwlock_t *lock)
- {
- unsigned long mask, tmp1, tmp2;
-
-@@ -181,7 +196,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
- : "memory");
- }
-
--static void inline arch_write_unlock(arch_rwlock_t *lock)
-+static inline void arch_write_unlock(arch_rwlock_t *lock)
- {
- __asm__ __volatile__(
- " stw %%g0, [%0]"
-@@ -190,7 +205,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
- : "memory");
- }
-
--static int inline arch_write_trylock(arch_rwlock_t *lock)
-+static inline int arch_write_trylock(arch_rwlock_t *lock)
- {
- unsigned long mask, tmp1, tmp2, result;
-
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 229475f..2fca9163 100644
--- a/arch/sparc/include/asm/thread_info_32.h
@@ -11014,7 +11164,7 @@ index ea55f86..dbf15cf 100644
}
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
-index a6847fc..a5ac14a 100644
+index 5373136..c528f7e 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -10,6 +10,7 @@
@@ -11025,7 +11175,7 @@ index a6847fc..a5ac14a 100644
#include <asm/asi.h>
#include <asm/spitfire.h>
#include <asm-generic/uaccess-unaligned.h>
-@@ -76,6 +77,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
+@@ -77,6 +78,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
return 1;
}
@@ -11037,7 +11187,7 @@ index a6847fc..a5ac14a 100644
static inline int access_ok(int type, const void __user * addr, unsigned long size)
{
return 1;
-@@ -207,6 +213,9 @@ unsigned long __must_check ___copy_from_user(void *to,
+@@ -191,6 +197,9 @@ unsigned long __must_check ___copy_from_user(void *to,
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
@@ -11047,7 +11197,7 @@ index a6847fc..a5ac14a 100644
check_object_size(to, size, false);
return ___copy_from_user(to, from, size);
-@@ -219,6 +228,9 @@ unsigned long __must_check ___copy_to_user(void __user *to,
+@@ -203,6 +212,9 @@ unsigned long __must_check ___copy_to_user(void __user *to,
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size)
{
@@ -11058,7 +11208,7 @@ index a6847fc..a5ac14a 100644
return ___copy_to_user(to, from, size);
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
-index fdb1332..1b10f89 100644
+index fa3c02d..c9a6309 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -4,7 +4,7 @@
@@ -11070,6 +11220,26 @@ index fdb1332..1b10f89 100644
extra-y := head_$(BITS).o
+diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
+index b0377db..1da3b53 100644
+--- a/arch/sparc/kernel/kprobes.c
++++ b/arch/sparc/kernel/kprobes.c
+@@ -499,6 +499,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+ * value kept in ri->ret_addr so we don't need to keep adjusting it
+ * back and forth.
+ */
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -508,6 +509,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ regs->u_regs[UREG_RETPC] =
+ ((unsigned long)kretprobe_trampoline) - 8;
+ }
++#endif
+
+ /*
+ * Called when the probe at kretprobe trampoline is hit
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index b7780a5..28315f0 100644
--- a/arch/sparc/kernel/process_32.c
@@ -11101,7 +11271,7 @@ index b7780a5..28315f0 100644
} while (++count < 16);
printk("\n");
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
-index fa14402..b2a7408 100644
+index 47ff558..2333c8a 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
@@ -11131,7 +11301,7 @@ index fa14402..b2a7408 100644
show_regwindow(regs);
show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
}
-@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
+@@ -278,7 +278,7 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
((tp && tp->task) ? tp->task->pid : -1));
if (gp->tstate & TSTATE_PRIV) {
@@ -11154,10 +11324,10 @@ index 79cc0d1..46d6233 100644
.getproplen = prom_getproplen,
.getproperty = prom_getproperty,
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
-index 9ddc492..27a5619 100644
+index ac082dd..7170942 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
-@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
+@@ -1068,6 +1068,10 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
@@ -11168,7 +11338,7 @@ index 9ddc492..27a5619 100644
asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
int ret = 0;
-@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+@@ -1078,6 +1082,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
if (test_thread_flag(TIF_NOHZ))
user_exit();
@@ -11180,7 +11350,7 @@ index 9ddc492..27a5619 100644
if (test_thread_flag(TIF_SYSCALL_TRACE))
ret = tracehook_report_syscall_entry(regs);
-@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+@@ -1096,6 +1105,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
if (test_thread_flag(TIF_NOHZ))
user_exit();
@@ -11193,10 +11363,10 @@ index 9ddc492..27a5619 100644
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
-index d3035ba..40683bd 100644
+index 8182f7c..a5ab37f 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
-@@ -891,7 +891,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+@@ -895,7 +895,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
return;
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -11205,7 +11375,7 @@ index d3035ba..40683bd 100644
#endif
this_cpu = get_cpu();
-@@ -915,7 +915,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+@@ -919,7 +919,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, cpumask_of(cpu));
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -11214,7 +11384,7 @@ index d3035ba..40683bd 100644
#endif
}
}
-@@ -934,7 +934,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+@@ -938,7 +938,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
preempt_disable();
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -11223,7 +11393,7 @@ index d3035ba..40683bd 100644
#endif
data0 = 0;
pg_addr = page_address(page);
-@@ -951,7 +951,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+@@ -955,7 +955,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
xcall_deliver(data0, __pa(pg_addr),
(u64) pg_addr, cpu_online_mask);
#ifdef CONFIG_DEBUG_DCFLUSH
@@ -11504,7 +11674,7 @@ index 4f21df7..0a374da 100644
}
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
-index d21cd62..4e2ca86 100644
+index 4094a51..4a360da 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
@@ -11605,7 +11775,7 @@ index d21cd62..4e2ca86 100644
wmb();
printk("%s: Queue overflowed %d times.\n",
pfx, cnt);
-@@ -2048,7 +2059,7 @@ out:
+@@ -2048,7 +2059,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
*/
void sun4v_resum_overflow(struct pt_regs *regs)
{
@@ -11694,7 +11864,7 @@ index d21cd62..4e2ca86 100644
}
EXPORT_SYMBOL(die_if_kernel);
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
-index 9aacb91..6415c82 100644
+index 52c00d9..6f8aa4e 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
@@ -11707,7 +11877,7 @@ index 9aacb91..6415c82 100644
}
}
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
-index 4f2384a..4e88949 100644
+index 69912d2..6c0c227 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -2,7 +2,7 @@
@@ -11720,10 +11890,10 @@ index 4f2384a..4e88949 100644
lib-$(CONFIG_SPARC32) += ashrdi3.o
lib-$(CONFIG_SPARC32) += memcpy.o memset.o
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
-index a5c5a02..47db32c 100644
+index 1c6a1bd..93e9698 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
-@@ -16,11 +16,22 @@
+@@ -17,11 +17,22 @@
* barriers.
*/
@@ -11749,12 +11919,14 @@ index a5c5a02..47db32c 100644
cas [%o1], %g1, %g7; \
cmp %g1, %g7; \
bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
-@@ -28,13 +39,17 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+@@ -29,14 +40,18 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op); \
-+ENDPROC(atomic_##op##suffix);
+-EXPORT_SYMBOL(atomic_##op);
++ENDPROC(atomic_##op##suffix); \
++EXPORT_SYMBOL(atomic_##op##suffix);
-#define ATOMIC_OP_RETURN(op) \
-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
@@ -11771,19 +11943,21 @@ index a5c5a02..47db32c 100644
cas [%o1], %g1, %g7; \
cmp %g1, %g7; \
bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
-@@ -42,7 +57,10 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+@@ -44,8 +59,11 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
--ENDPROC(atomic_##op##_return);
-+ENDPROC(atomic_##op##_return##suffix);
+-ENDPROC(atomic_##op##_return); \
+-EXPORT_SYMBOL(atomic_##op##_return);
++ENDPROC(atomic_##op##_return##suffix); \
++EXPORT_SYMBOL(atomic_##op##_return##suffix)
+
+#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
+ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
#define ATOMIC_FETCH_OP(op) \
ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
-@@ -73,13 +91,16 @@ ATOMIC_OPS(xor)
+@@ -77,13 +95,16 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
@@ -11803,12 +11977,14 @@ index a5c5a02..47db32c 100644
casx [%o1], %g1, %g7; \
cmp %g1, %g7; \
bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
-@@ -87,13 +108,17 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+@@ -91,14 +112,18 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op); \
-+ENDPROC(atomic64_##op##suffix);
+-EXPORT_SYMBOL(atomic64_##op);
++ENDPROC(atomic64_##op##suffix); \
++EXPORT_SYMBOL(atomic64_##op##suffix);
-#define ATOMIC64_OP_RETURN(op) \
-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
@@ -11825,19 +12001,21 @@ index a5c5a02..47db32c 100644
casx [%o1], %g1, %g7; \
cmp %g1, %g7; \
bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
-@@ -101,7 +126,10 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+@@ -106,8 +131,11 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
op %g1, %o0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
--ENDPROC(atomic64_##op##_return);
-+ENDPROC(atomic64_##op##_return##suffix);
+-ENDPROC(atomic64_##op##_return); \
+-EXPORT_SYMBOL(atomic64_##op##_return);
++ENDPROC(atomic64_##op##_return##suffix); \
++EXPORT_SYMBOL(atomic64_##op##_return##suffix);
+
+#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
+ __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
#define ATOMIC64_FETCH_OP(op) \
ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
-@@ -132,7 +160,12 @@ ATOMIC64_OPS(xor)
+@@ -139,7 +167,12 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
@@ -11850,30 +12028,6 @@ index a5c5a02..47db32c 100644
ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
BACKOFF_SETUP(%o2)
-diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
-index de5e978..cf48854 100644
---- a/arch/sparc/lib/ksyms.c
-+++ b/arch/sparc/lib/ksyms.c
-@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
- /* Atomic counter implementation. */
- #define ATOMIC_OP(op) \
- EXPORT_SYMBOL(atomic_##op); \
--EXPORT_SYMBOL(atomic64_##op);
-+EXPORT_SYMBOL(atomic_##op##_unchecked); \
-+EXPORT_SYMBOL(atomic64_##op); \
-+EXPORT_SYMBOL(atomic64_##op##_unchecked);
-
- #define ATOMIC_OP_RETURN(op) \
- EXPORT_SYMBOL(atomic_##op##_return); \
-@@ -114,6 +116,8 @@ EXPORT_SYMBOL(atomic64_fetch_##op);
- #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
-
- ATOMIC_OPS(add)
-+EXPORT_SYMBOL(atomic_add_return_unchecked);
-+EXPORT_SYMBOL(atomic64_add_return_unchecked);
- ATOMIC_OPS(sub)
-
- #undef ATOMIC_OPS
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 30c3ecc..736f015 100644
--- a/arch/sparc/mm/Makefile
@@ -12179,7 +12333,7 @@ index 4714061..bad7f9a 100644
static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
int text_fault)
{
-@@ -226,6 +500,24 @@ good_area:
+@@ -226,6 +500,24 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
@@ -12205,7 +12359,7 @@ index 4714061..bad7f9a 100644
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
-index 3f291d8..b335338 100644
+index 643c149..845c113 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -23,6 +23,9 @@
@@ -12694,7 +12848,7 @@ index 3f291d8..b335338 100644
asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
-@@ -350,6 +813,29 @@ retry:
+@@ -350,6 +813,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
if (!vma)
goto bad_area;
@@ -12837,7 +12991,7 @@ index 988acc8b..f26345c 100644
pte_t *huge_pte_alloc(struct mm_struct *mm,
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
-index 05c7708..222a546 100644
+index 37aa537..06b756c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -189,9 +189,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
@@ -12875,10 +13029,10 @@ index 05c7708..222a546 100644
#endif /* CONFIG_DEBUG_DCFLUSH */
}
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
-index 78da75b..264302d 100644
+index 4583c03..5e074bb 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
-@@ -193,6 +193,7 @@ source "kernel/Kconfig.hz"
+@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
config KEXEC
bool "kexec system call"
select KEXEC_CORE
@@ -12908,7 +13062,7 @@ index 4cefa0c..98d8b83 100644
#endif /* _ASM_TILE_ATOMIC_64_H */
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
-index 6160761..00cac88 100644
+index 4810e48..08b733b 100644
--- a/arch/tile/include/asm/cache.h
+++ b/arch/tile/include/asm/cache.h
@@ -15,11 +15,12 @@
@@ -12941,6 +13095,42 @@ index a77369e..7ba6ecd 100644
n = _copy_from_user(to, from, n);
else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
+diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
+index c68694b..12bf0cb 100644
+--- a/arch/tile/kernel/kprobes.c
++++ b/arch/tile/kernel/kprobes.c
+@@ -430,6 +430,7 @@ static void __used kretprobe_trampoline_holder(void)
+
+ void kretprobe_trampoline(void);
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -438,6 +439,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->lr = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ /*
+ * Called when the probe at kretprobe trampoline is hit.
+@@ -507,6 +509,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ return 1;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+@@ -514,6 +517,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)kretprobe_trampoline,
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 77ceaa3..3630dea 100644
--- a/arch/tile/mm/hugetlbpage.c
@@ -12974,6 +13164,19 @@ index 0ca46ede..8d7fd38 100644
#This will adjust *FLAGS accordingly to the platform.
include $(ARCH_DIR)/Makefile-os-$(OS)
+diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
+index 6208702..00292c8 100644
+--- a/arch/um/drivers/line.c
++++ b/arch/um/drivers/line.c
+@@ -377,7 +377,7 @@ int setup_one_line(struct line *lines, int n, char *init,
+ struct tty_driver *driver = line->driver->driver;
+ int err = -EINVAL;
+
+- if (line->port.count) {
++ if (atomic_read(&line->port.count)) {
+ *error_out = "Device is already open";
+ goto out;
+ }
diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
index 19e1bdd..3665b77 100644
--- a/arch/um/include/asm/cache.h
@@ -13077,7 +13280,7 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 2a1f0ce..ca2cc51 100644
+index bada636..1775eac 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -39,14 +39,13 @@ config X86
@@ -13096,10 +13299,10 @@ index 2a1f0ce..ca2cc51 100644
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION if X86_32
select BUILDTIME_EXTABLE_SORT
-@@ -93,7 +92,7 @@ config X86
- select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+@@ -94,7 +93,7 @@ config X86
select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_EBPF_JIT if X86_64
+ select HAVE_ARCH_VMAP_STACK if X86_64
- select HAVE_CC_STACKPROTECTOR
+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
select HAVE_CMPXCHG_DOUBLE
@@ -13113,7 +13316,7 @@ index 2a1f0ce..ca2cc51 100644
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI
-@@ -189,11 +189,13 @@ config MMU
+@@ -190,11 +190,13 @@ config MMU
def_bool y
config ARCH_MMAP_RND_BITS_MIN
@@ -13129,7 +13332,7 @@ index 2a1f0ce..ca2cc51 100644
default 16
config ARCH_MMAP_RND_COMPAT_BITS_MIN
-@@ -295,7 +297,7 @@ config X86_64_SMP
+@@ -296,7 +298,7 @@ config X86_64_SMP
config X86_32_LAZY_GS
def_bool y
@@ -13138,7 +13341,7 @@ index 2a1f0ce..ca2cc51 100644
config ARCH_SUPPORTS_UPROBES
def_bool y
-@@ -677,6 +679,7 @@ config SCHED_OMIT_FRAME_POINTER
+@@ -690,6 +692,7 @@ config SCHED_OMIT_FRAME_POINTER
menuconfig HYPERVISOR_GUEST
bool "Linux guest support"
@@ -13146,7 +13349,7 @@ index 2a1f0ce..ca2cc51 100644
---help---
Say Y here to enable options for running Linux under various hyper-
visors. This option enables basic hypervisor detection and platform
-@@ -1078,6 +1081,7 @@ config VM86
+@@ -1090,6 +1093,7 @@ config VM86
config X86_16BIT
bool "Enable support for 16-bit segments" if EXPERT
@@ -13154,7 +13357,7 @@ index 2a1f0ce..ca2cc51 100644
default y
depends on MODIFY_LDT_SYSCALL
---help---
-@@ -1232,6 +1236,7 @@ choice
+@@ -1244,6 +1248,7 @@ choice
config NOHIGHMEM
bool "off"
@@ -13162,7 +13365,7 @@ index 2a1f0ce..ca2cc51 100644
---help---
Linux can use up to 64 Gigabytes of physical memory on x86 systems.
However, the address space of 32-bit x86 processors is only 4
-@@ -1268,6 +1273,7 @@ config NOHIGHMEM
+@@ -1280,6 +1285,7 @@ config NOHIGHMEM
config HIGHMEM4G
bool "4GB"
@@ -13170,7 +13373,7 @@ index 2a1f0ce..ca2cc51 100644
---help---
Select this if you have a 32-bit processor and between 1 and 4
gigabytes of physical RAM.
-@@ -1320,7 +1326,7 @@ config PAGE_OFFSET
+@@ -1332,7 +1338,7 @@ config PAGE_OFFSET
hex
default 0xB0000000 if VMSPLIT_3G_OPT
default 0x80000000 if VMSPLIT_2G
@@ -13179,7 +13382,7 @@ index 2a1f0ce..ca2cc51 100644
default 0x40000000 if VMSPLIT_1G
default 0xC0000000
depends on X86_32
-@@ -1341,7 +1347,6 @@ config X86_PAE
+@@ -1353,7 +1359,6 @@ config X86_PAE
config ARCH_PHYS_ADDR_T_64BIT
def_bool y
@@ -13187,7 +13390,7 @@ index 2a1f0ce..ca2cc51 100644
config ARCH_DMA_ADDR_T_64BIT
def_bool y
-@@ -1472,7 +1477,7 @@ config ARCH_PROC_KCORE_TEXT
+@@ -1484,7 +1489,7 @@ config ARCH_PROC_KCORE_TEXT
config ILLEGAL_POINTER_VALUE
hex
@@ -13196,7 +13399,7 @@ index 2a1f0ce..ca2cc51 100644
default 0xdead000000000000 if X86_64
source "mm/Kconfig"
-@@ -1795,6 +1800,7 @@ source kernel/Kconfig.hz
+@@ -1807,6 +1812,7 @@ source kernel/Kconfig.hz
config KEXEC
bool "kexec system call"
select KEXEC_CORE
@@ -13204,7 +13407,7 @@ index 2a1f0ce..ca2cc51 100644
---help---
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
-@@ -1922,7 +1928,7 @@ config RELOCATABLE
+@@ -1934,7 +1940,7 @@ config RELOCATABLE
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image (KASLR)"
@@ -13213,7 +13416,7 @@ index 2a1f0ce..ca2cc51 100644
default n
---help---
In support of Kernel Address Space Layout Randomization (KASLR),
-@@ -1966,7 +1972,9 @@ config X86_NEED_RELOCS
+@@ -1978,7 +1984,9 @@ config X86_NEED_RELOCS
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned"
@@ -13224,7 +13427,7 @@ index 2a1f0ce..ca2cc51 100644
range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help---
-@@ -2081,6 +2089,7 @@ config COMPAT_VDSO
+@@ -2093,6 +2101,7 @@ config COMPAT_VDSO
def_bool n
prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
depends on X86_32 || IA32_EMULATION
@@ -13232,7 +13435,7 @@ index 2a1f0ce..ca2cc51 100644
---help---
Certain buggy versions of glibc will crash if they are
presented with a 32-bit vDSO that is not mapped at the address
-@@ -2121,15 +2130,6 @@ choice
+@@ -2133,15 +2142,6 @@ choice
If unsure, select "Emulate".
@@ -13248,7 +13451,7 @@ index 2a1f0ce..ca2cc51 100644
config LEGACY_VSYSCALL_EMULATE
bool "Emulate"
help
-@@ -2210,6 +2210,22 @@ config MODIFY_LDT_SYSCALL
+@@ -2222,6 +2222,22 @@ config MODIFY_LDT_SYSCALL
Saying 'N' here may make sense for embedded or server kernels.
@@ -13340,7 +13543,7 @@ index 67eec55..1a5c1ab 100644
---help---
This is a debug driver, which gets the power states
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 830ed39..56602a5 100644
+index 2d44933..86ecceb 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -75,9 +75,6 @@ ifeq ($(CONFIG_X86_32),y)
@@ -13363,7 +13566,7 @@ index 830ed39..56602a5 100644
ifdef CONFIG_X86_X32
x32_ld_ok := $(call try-run,\
/bin/echo -e '1: .quad 1b' | \
-@@ -191,6 +191,7 @@ archheaders:
+@@ -192,6 +192,7 @@ archheaders:
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
archprepare:
@@ -13371,7 +13574,7 @@ index 830ed39..56602a5 100644
ifeq ($(CONFIG_KEXEC_FILE),y)
$(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
endif
-@@ -277,3 +278,9 @@ define archhelp
+@@ -278,3 +279,9 @@ define archhelp
echo ' FDARGS="..." arguments for the booted kernel'
echo ' FDINITRD=file initrd for the booted kernel'
endef
@@ -13381,6 +13584,18 @@ index 830ed39..56602a5 100644
+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
+*** Please upgrade your binutils to 2.18 or newer
+endef
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 12ea8f8..46969be 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -11,6 +11,7 @@
+
+ KASAN_SANITIZE := n
+ OBJECT_FILES_NON_STANDARD := y
++GCC_PLUGINS := n
+
+ # Kernel does not boot with kcov instrumentation here.
+ # One of the problems observed was insertion of __sanitizer_cov_trace_pc()
diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
index 0d41d68..2d6120c 100644
--- a/arch/x86/boot/bitops.h
@@ -13417,10 +13632,18 @@ index e5612f3..e755d05 100644
}
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
-index 536ccfc..1295cc1f 100644
+index 34d9e15..c26e047 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
-@@ -35,6 +35,23 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse
+@@ -18,6 +18,7 @@
+
+ KASAN_SANITIZE := n
+ OBJECT_FILES_NON_STANDARD := y
++GCC_PLUGINS := n
+
+ # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+ KCOV_INSTRUMENT := n
+@@ -35,6 +36,23 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
@@ -13486,6 +13709,16 @@ index a53440e..c3dbf1e 100644
ENDPROC(efi_call_phys)
.previous
+diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S
+index 99494dff..7fa59bf 100644
+--- a/arch/x86/boot/compressed/efi_stub_64.S
++++ b/arch/x86/boot/compressed/efi_stub_64.S
+@@ -2,4 +2,5 @@
+ #include <asm/msr.h>
+ #include <asm/processor-flags.h>
+
++#define efi_call efi_call_early
+ #include "../../platform/efi/efi_stub_64.S"
diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
index 630384a..278e788 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
@@ -13502,7 +13735,7 @@ index 630384a..278e788 100644
.quad 0x0000000000000000 /* TS continued */
efi_gdt64_end:
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
-index 1038524..b6acc21 100644
+index fd0b6a2..7206864 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -169,10 +169,10 @@ preferred_addr:
@@ -13519,7 +13752,7 @@ index 1038524..b6acc21 100644
/* Target address to relocate to for decompression */
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
-index 0d80a7a..ed3e0ff 100644
+index efdfba2..af6d962 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -103,10 +103,10 @@ ENTRY(startup_32)
@@ -13559,6 +13792,15 @@ index 0d80a7a..ed3e0ff 100644
.quad 0x0080890000000000 /* TS descriptor */
.quad 0x0000000000000000 /* TS continued */
gdt_end:
+@@ -465,7 +465,7 @@ efi32_config:
+ .global efi64_config
+ efi64_config:
+ .fill 4,8,0
+- .quad efi_call
++ .quad efi_call_early
+ .byte 1
+ #endif /* CONFIG_EFI_STUB */
+
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index b3c5a5f0..596115e 100644
--- a/arch/x86/boot/compressed/misc.c
@@ -13758,8 +14000,36 @@ index 77780e3..86be0cb 100644
int key;
unsigned int v;
+diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S
+index 2849dbc..d7ff39c 100644
+--- a/arch/x86/crypto/aes-i586-asm_32.S
++++ b/arch/x86/crypto/aes-i586-asm_32.S
+@@ -38,6 +38,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/alternative-asm.h>
+
+ #define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
+
+@@ -286,7 +287,7 @@ ENTRY(aes_enc_blk)
+ pop %ebx
+ mov %r0,(%ebp)
+ pop %ebp
+- ret
++ pax_ret aes_enc_blk
+ ENDPROC(aes_enc_blk)
+
+ // AES (Rijndael) Decryption Subroutine
+@@ -358,5 +359,5 @@ ENTRY(aes_dec_blk)
+ pop %ebx
+ mov %r0,(%ebp)
+ pop %ebp
+- ret
++ pax_ret aes_dec_blk
+ ENDPROC(aes_dec_blk)
diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
-index 9105655..41779c1 100644
+index 9105655..cf81747 100644
--- a/arch/x86/crypto/aes-x86_64-asm_64.S
+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
@@ -8,6 +8,8 @@
@@ -13771,17 +14041,73 @@ index 9105655..41779c1 100644
.extern crypto_ft_tab
.extern crypto_it_tab
.extern crypto_fl_tab
-@@ -70,6 +72,8 @@
- je B192; \
- leaq 32(r9),r9;
+@@ -77,7 +79,7 @@
+ movl r6 ## E,4(r9); \
+ movl r7 ## E,8(r9); \
+ movl r8 ## E,12(r9); \
+- ret; \
++ pax_ret FUNC; \
+ ENDPROC(FUNC);
+
+ #define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \
+diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+index a916c4a..7e7b7cf 100644
+--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
++++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+@@ -64,6 +64,7 @@
-+#define ret pax_force_retaddr; ret
-+
- #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
- movq r1,r2; \
- movq r3,r4; \
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+
+ #define CONCAT(a,b) a##b
+ #define VMOVDQ vmovdqu
+@@ -436,7 +437,7 @@ ddq_add_8:
+
+ /* main body of aes ctr load */
+
+-.macro do_aes_ctrmain key_len
++.macro do_aes_ctrmain func key_len
+ cmp $16, num_bytes
+ jb .Ldo_return2\key_len
+
+@@ -537,7 +538,7 @@ ddq_add_8:
+ /* return updated IV */
+ vpshufb xbyteswap, xcounter, xcounter
+ vmovdqu xcounter, (p_iv)
+- ret
++ pax_ret \func
+ .endm
+
+ /*
+@@ -549,7 +550,7 @@ ddq_add_8:
+ */
+ ENTRY(aes_ctr_enc_128_avx_by8)
+ /* call the aes main loop */
+- do_aes_ctrmain KEY_128
++ do_aes_ctrmain aes_ctr_enc_128_avx_by8 KEY_128
+
+ ENDPROC(aes_ctr_enc_128_avx_by8)
+
+@@ -562,7 +563,7 @@ ENDPROC(aes_ctr_enc_128_avx_by8)
+ */
+ ENTRY(aes_ctr_enc_192_avx_by8)
+ /* call the aes main loop */
+- do_aes_ctrmain KEY_192
++ do_aes_ctrmain aes_ctr_enc_192_avx_by8 KEY_192
+
+ ENDPROC(aes_ctr_enc_192_avx_by8)
+
+@@ -575,6 +576,6 @@ ENDPROC(aes_ctr_enc_192_avx_by8)
+ */
+ ENTRY(aes_ctr_enc_256_avx_by8)
+ /* call the aes main loop */
+- do_aes_ctrmain KEY_256
++ do_aes_ctrmain aes_ctr_enc_256_avx_by8 KEY_256
+
+ ENDPROC(aes_ctr_enc_256_avx_by8)
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
-index 383a6f8..a4db591 100644
+index 383a6f8..dc7f45d 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -32,6 +32,7 @@
@@ -13944,17 +14270,18 @@ index 383a6f8..a4db591 100644
shl $3, %arg4 # len(C) in bits (*128)
MOVQ_R64_XMM %arg4, %xmm1
pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
-@@ -1452,7 +1453,8 @@ _return_T_done_decrypt:
+@@ -1452,8 +1453,8 @@ _return_T_done_decrypt:
mov %r14, %rsp
pop %r14
pop %r13
- pop %r12
+- ret
+ pop %r15
-+ pax_force_retaddr
- ret
++ pax_ret aesni_gcm_dec
ENDPROC(aesni_gcm_dec)
-@@ -1540,8 +1542,8 @@ ENDPROC(aesni_gcm_dec)
+
+@@ -1540,8 +1541,8 @@ ENDPROC(aesni_gcm_dec)
*
* poly = x^128 + x^127 + x^126 + x^121 + 1
***************************************************************************/
@@ -13965,7 +14292,7 @@ index 383a6f8..a4db591 100644
push %r13
push %r14
mov %rsp, %r14
-@@ -1551,8 +1553,8 @@ ENTRY(aesni_gcm_enc)
+@@ -1551,8 +1552,8 @@ ENTRY(aesni_gcm_enc)
#
sub $VARIABLE_OFFSET, %rsp
and $~63, %rsp
@@ -13976,7 +14303,7 @@ index 383a6f8..a4db591 100644
movdqa SHUF_MASK(%rip), %xmm2
PSHUFB_XMM %xmm2, %xmm13
-@@ -1576,13 +1578,13 @@ ENTRY(aesni_gcm_enc)
+@@ -1576,13 +1577,13 @@ ENTRY(aesni_gcm_enc)
movdqa %xmm13, HashKey(%rsp)
mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
and $-16, %r13
@@ -13993,7 +14320,7 @@ index 383a6f8..a4db591 100644
jb _initial_num_blocks_is_1_encrypt
je _initial_num_blocks_is_2_encrypt
_initial_num_blocks_is_3_encrypt:
-@@ -1635,14 +1637,14 @@ _zero_cipher_left_encrypt:
+@@ -1635,14 +1636,14 @@ _zero_cipher_left_encrypt:
sub $16, %r11
add %r13, %r11
movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
@@ -14012,7 +14339,7 @@ index 383a6f8..a4db591 100644
# get the appropriate mask to mask out top 16-r13 bytes of xmm0
pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
movdqa SHUF_MASK(%rip), %xmm10
-@@ -1675,9 +1677,9 @@ _less_than_8_bytes_left_encrypt:
+@@ -1675,9 +1676,9 @@ _less_than_8_bytes_left_encrypt:
sub $1, %r13
jne _less_than_8_bytes_left_encrypt
_multiple_of_16_bytes_encrypt:
@@ -14025,54 +14352,163 @@ index 383a6f8..a4db591 100644
shl $3, %arg4 # len(C) in bits (*128)
MOVQ_R64_XMM %arg4, %xmm1
pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
-@@ -1716,7 +1718,8 @@ _return_T_done_encrypt:
+@@ -1716,8 +1717,8 @@ _return_T_done_encrypt:
mov %r14, %rsp
pop %r14
pop %r13
- pop %r12
+- ret
+ pop %r15
-+ pax_force_retaddr
- ret
++ pax_ret aesni_gcm_enc
ENDPROC(aesni_gcm_enc)
-@@ -1734,6 +1737,7 @@ _key_expansion_256a:
+ #endif
+@@ -1734,7 +1735,7 @@ _key_expansion_256a:
pxor %xmm1, %xmm0
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret _key_expansion_128
ENDPROC(_key_expansion_128)
ENDPROC(_key_expansion_256a)
-@@ -1760,6 +1764,7 @@ _key_expansion_192a:
+
+@@ -1760,7 +1761,7 @@ _key_expansion_192a:
shufps $0b01001110, %xmm2, %xmm1
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret _key_expansion_192a
ENDPROC(_key_expansion_192a)
-@@ -1780,6 +1785,7 @@ _key_expansion_192b:
+ .align 4
+@@ -1780,7 +1781,7 @@ _key_expansion_192b:
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret _key_expansion_192b
ENDPROC(_key_expansion_192b)
-@@ -1793,6 +1799,7 @@ _key_expansion_256b:
+ .align 4
+@@ -1793,7 +1794,7 @@ _key_expansion_256b:
pxor %xmm1, %xmm2
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret _key_expansion_256b
ENDPROC(_key_expansion_256b)
-@@ -1908,13 +1915,14 @@ ENTRY(aesni_set_key)
+ /*
+@@ -1820,72 +1821,72 @@ ENTRY(aesni_set_key)
+ movaps %xmm2, (TKEYP)
+ add $0x10, TKEYP
+ AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x1 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x2 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x4 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x8 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x10 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x20 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ jmp .Ldec_key
+ .Lenc_key192:
+ movq 0x10(UKEYP), %xmm2 # other user key
+ AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1
+- call _key_expansion_192a
++ pax_direct_call _key_expansion_192a
+ AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2
+- call _key_expansion_192b
++ pax_direct_call _key_expansion_192b
+ AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3
+- call _key_expansion_192a
++ pax_direct_call _key_expansion_192a
+ AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4
+- call _key_expansion_192b
++ pax_direct_call _key_expansion_192b
+ AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5
+- call _key_expansion_192a
++ pax_direct_call _key_expansion_192a
+ AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6
+- call _key_expansion_192b
++ pax_direct_call _key_expansion_192b
+ AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7
+- call _key_expansion_192a
++ pax_direct_call _key_expansion_192a
+ AESKEYGENASSIST 0x80 %xmm2 %xmm1 # round 8
+- call _key_expansion_192b
++ pax_direct_call _key_expansion_192b
+ jmp .Ldec_key
+ .Lenc_key128:
+ AESKEYGENASSIST 0x1 %xmm0 %xmm1 # round 1
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x2 %xmm0 %xmm1 # round 2
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x4 %xmm0 %xmm1 # round 3
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x8 %xmm0 %xmm1 # round 4
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x10 %xmm0 %xmm1 # round 5
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x20 %xmm0 %xmm1 # round 6
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x40 %xmm0 %xmm1 # round 7
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x80 %xmm0 %xmm1 # round 8
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x1b %xmm0 %xmm1 # round 9
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x36 %xmm0 %xmm1 # round 10
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ .Ldec_key:
+ sub $0x10, TKEYP
+ movaps (KEYP), %xmm0
+@@ -1908,13 +1909,13 @@ ENTRY(aesni_set_key)
popl KEYP
#endif
FRAME_END
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret aesni_set_key
ENDPROC(aesni_set_key)
/*
@@ -14083,28 +14519,38 @@ index 383a6f8..a4db591 100644
FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
-@@ -1932,6 +1940,7 @@ ENTRY(aesni_enc)
+@@ -1925,14 +1926,14 @@ ENTRY(aesni_enc)
+ #endif
+ movl 480(KEYP), KLEN # key length
+ movups (INP), STATE # input
+- call _aesni_enc1
++ pax_direct_call _aesni_enc1
+ movups STATE, (OUTP) # output
+ #ifndef __x86_64__
+ popl KLEN
popl KEYP
#endif
FRAME_END
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret aesni_enc
ENDPROC(aesni_enc)
-@@ -1990,6 +1999,7 @@ _aesni_enc1:
+ /*
+@@ -1990,7 +1991,7 @@ _aesni_enc1:
AESENC KEY STATE
movaps 0x70(TKEYP), KEY
AESENCLAST KEY STATE
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret _aesni_enc1
ENDPROC(_aesni_enc1)
-@@ -2099,13 +2109,14 @@ _aesni_enc4:
+ /*
+@@ -2099,13 +2100,13 @@ _aesni_enc4:
AESENCLAST KEY STATE2
AESENCLAST KEY STATE3
AESENCLAST KEY STATE4
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret _aesni_enc4
ENDPROC(_aesni_enc4)
/*
@@ -14115,79 +14561,158 @@ index 383a6f8..a4db591 100644
FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
-@@ -2124,6 +2135,7 @@ ENTRY(aesni_dec)
+@@ -2117,14 +2118,14 @@ ENTRY(aesni_dec)
+ mov 480(KEYP), KLEN # key length
+ add $240, KEYP
+ movups (INP), STATE # input
+- call _aesni_dec1
++ pax_direct_call _aesni_dec1
+ movups STATE, (OUTP) #output
+ #ifndef __x86_64__
+ popl KLEN
popl KEYP
#endif
FRAME_END
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret aesni_dec
ENDPROC(aesni_dec)
-@@ -2182,6 +2194,7 @@ _aesni_dec1:
+ /*
+@@ -2182,7 +2183,7 @@ _aesni_dec1:
AESDEC KEY STATE
movaps 0x70(TKEYP), KEY
AESDECLAST KEY STATE
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret _aesni_dec1
ENDPROC(_aesni_dec1)
-@@ -2291,6 +2304,7 @@ _aesni_dec4:
+ /*
+@@ -2291,7 +2292,7 @@ _aesni_dec4:
AESDECLAST KEY STATE2
AESDECLAST KEY STATE3
AESDECLAST KEY STATE4
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret _aesni_dec4
ENDPROC(_aesni_dec4)
-@@ -2351,6 +2365,7 @@ ENTRY(aesni_ecb_enc)
+ /*
+@@ -2322,7 +2323,7 @@ ENTRY(aesni_ecb_enc)
+ movups 0x10(INP), STATE2
+ movups 0x20(INP), STATE3
+ movups 0x30(INP), STATE4
+- call _aesni_enc4
++ pax_direct_call _aesni_enc4
+ movups STATE1, (OUTP)
+ movups STATE2, 0x10(OUTP)
+ movups STATE3, 0x20(OUTP)
+@@ -2337,7 +2338,7 @@ ENTRY(aesni_ecb_enc)
+ .align 4
+ .Lecb_enc_loop1:
+ movups (INP), STATE1
+- call _aesni_enc1
++ pax_direct_call _aesni_enc1
+ movups STATE1, (OUTP)
+ sub $16, LEN
+ add $16, INP
+@@ -2351,7 +2352,7 @@ ENTRY(aesni_ecb_enc)
popl LEN
#endif
FRAME_END
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret aesni_ecb_enc
ENDPROC(aesni_ecb_enc)
-@@ -2412,6 +2427,7 @@ ENTRY(aesni_ecb_dec)
+ /*
+@@ -2383,7 +2384,7 @@ ENTRY(aesni_ecb_dec)
+ movups 0x10(INP), STATE2
+ movups 0x20(INP), STATE3
+ movups 0x30(INP), STATE4
+- call _aesni_dec4
++ pax_direct_call _aesni_dec4
+ movups STATE1, (OUTP)
+ movups STATE2, 0x10(OUTP)
+ movups STATE3, 0x20(OUTP)
+@@ -2398,7 +2399,7 @@ ENTRY(aesni_ecb_dec)
+ .align 4
+ .Lecb_dec_loop1:
+ movups (INP), STATE1
+- call _aesni_dec1
++ pax_direct_call _aesni_dec1
+ movups STATE1, (OUTP)
+ sub $16, LEN
+ add $16, INP
+@@ -2412,7 +2413,7 @@ ENTRY(aesni_ecb_dec)
popl LEN
#endif
FRAME_END
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret aesni_ecb_dec
ENDPROC(aesni_ecb_dec)
-@@ -2456,6 +2472,7 @@ ENTRY(aesni_cbc_enc)
+ /*
+@@ -2440,7 +2441,7 @@ ENTRY(aesni_cbc_enc)
+ .Lcbc_enc_loop:
+ movups (INP), IN # load input
+ pxor IN, STATE
+- call _aesni_enc1
++ pax_direct_call _aesni_enc1
+ movups STATE, (OUTP) # store output
+ sub $16, LEN
+ add $16, INP
+@@ -2456,7 +2457,7 @@ ENTRY(aesni_cbc_enc)
popl IVP
#endif
FRAME_END
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret aesni_cbc_enc
ENDPROC(aesni_cbc_enc)
-@@ -2549,6 +2566,7 @@ ENTRY(aesni_cbc_dec)
+ /*
+@@ -2500,7 +2501,7 @@ ENTRY(aesni_cbc_dec)
+ movups 0x30(INP), IN2
+ movaps IN2, STATE4
+ #endif
+- call _aesni_dec4
++ pax_direct_call _aesni_dec4
+ pxor IV, STATE1
+ #ifdef __x86_64__
+ pxor IN1, STATE2
+@@ -2530,7 +2531,7 @@ ENTRY(aesni_cbc_dec)
+ .Lcbc_dec_loop1:
+ movups (INP), IN
+ movaps IN, STATE
+- call _aesni_dec1
++ pax_direct_call _aesni_dec1
+ pxor IV, STATE
+ movups STATE, (OUTP)
+ movaps IN, IV
+@@ -2549,7 +2550,7 @@ ENTRY(aesni_cbc_dec)
popl IVP
#endif
FRAME_END
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret aesni_cbc_dec
ENDPROC(aesni_cbc_dec)
-@@ -2578,6 +2596,7 @@ _aesni_inc_init:
+ #ifdef __x86_64__
+@@ -2578,7 +2579,7 @@ _aesni_inc_init:
mov $1, TCTR_LOW
MOVQ_R64_XMM TCTR_LOW INC
MOVQ_R64_XMM CTR TCTR_LOW
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret _aesni_inc_init
ENDPROC(_aesni_inc_init)
-@@ -2607,6 +2626,7 @@ _aesni_inc:
+ /*
+@@ -2607,37 +2608,37 @@ _aesni_inc:
.Linc_low:
movaps CTR, IV
PSHUFB_XMM BSWAP_MASK IV
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret _aesni_inc
ENDPROC(_aesni_inc)
-@@ -2614,7 +2634,7 @@ ENDPROC(_aesni_inc)
+ /*
* void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
* size_t len, u8 *iv)
*/
@@ -14196,24 +14721,151 @@ index 383a6f8..a4db591 100644
FRAME_BEGIN
cmp $16, LEN
jb .Lctr_enc_just_ret
-@@ -2670,6 +2690,7 @@ ENTRY(aesni_ctr_enc)
+ mov 480(KEYP), KLEN
+ movups (IVP), IV
+- call _aesni_inc_init
++ pax_direct_call _aesni_inc_init
+ cmp $64, LEN
+ jb .Lctr_enc_loop1
+ .align 4
+ .Lctr_enc_loop4:
+ movaps IV, STATE1
+- call _aesni_inc
++ pax_direct_call _aesni_inc
+ movups (INP), IN1
+ movaps IV, STATE2
+- call _aesni_inc
++ pax_direct_call _aesni_inc
+ movups 0x10(INP), IN2
+ movaps IV, STATE3
+- call _aesni_inc
++ pax_direct_call _aesni_inc
+ movups 0x20(INP), IN3
+ movaps IV, STATE4
+- call _aesni_inc
++ pax_direct_call _aesni_inc
+ movups 0x30(INP), IN4
+- call _aesni_enc4
++ pax_direct_call _aesni_enc4
+ pxor IN1, STATE1
+ movups STATE1, (OUTP)
+ pxor IN2, STATE2
+@@ -2656,9 +2657,9 @@ ENTRY(aesni_ctr_enc)
+ .align 4
+ .Lctr_enc_loop1:
+ movaps IV, STATE
+- call _aesni_inc
++ pax_direct_call _aesni_inc
+ movups (INP), IN
+- call _aesni_enc1
++ pax_direct_call _aesni_enc1
+ pxor IN, STATE
+ movups STATE, (OUTP)
+ sub $16, LEN
+@@ -2670,7 +2671,7 @@ ENTRY(aesni_ctr_enc)
movups IV, (IVP)
.Lctr_enc_just_ret:
FRAME_END
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret aesni_ctr_enc
ENDPROC(aesni_ctr_enc)
-@@ -2798,6 +2819,7 @@ ENTRY(aesni_xts_crypt8)
+ /*
+@@ -2734,7 +2735,7 @@ ENTRY(aesni_xts_crypt8)
+ pxor INC, STATE4
+ movdqu IV, 0x30(OUTP)
+
+- call *%r11
++ pax_indirect_call "%r11", _aesni_enc4
+
+ movdqu 0x00(OUTP), INC
+ pxor INC, STATE1
+@@ -2779,7 +2780,7 @@ ENTRY(aesni_xts_crypt8)
+ _aesni_gf128mul_x_ble()
+ movups IV, (IVP)
+
+- call *%r11
++ pax_indirect_call "%r11", _aesni_enc4
+
+ movdqu 0x40(OUTP), INC
+ pxor INC, STATE1
+@@ -2798,7 +2799,7 @@ ENTRY(aesni_xts_crypt8)
movdqu STATE4, 0x70(OUTP)
FRAME_END
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret aesni_xts_crypt8
ENDPROC(aesni_xts_crypt8)
+ #endif
+diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
+index 522ab68..782ae42 100644
+--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
++++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
+@@ -121,6 +121,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+
+ .data
+ .align 16
+@@ -1486,7 +1487,7 @@ ENTRY(aesni_gcm_precomp_avx_gen2)
+ pop %r14
+ pop %r13
+ pop %r12
+- ret
++ pax_ret aesni_gcm_precomp_avx_gen2
+ ENDPROC(aesni_gcm_precomp_avx_gen2)
+
+ ###############################################################################
+@@ -1507,7 +1508,7 @@ ENDPROC(aesni_gcm_precomp_avx_gen2)
+ ###############################################################################
+ ENTRY(aesni_gcm_enc_avx_gen2)
+ GCM_ENC_DEC_AVX ENC
+- ret
++ pax_ret aesni_gcm_enc_avx_gen2
+ ENDPROC(aesni_gcm_enc_avx_gen2)
+
+ ###############################################################################
+@@ -1528,7 +1529,7 @@ ENDPROC(aesni_gcm_enc_avx_gen2)
+ ###############################################################################
+ ENTRY(aesni_gcm_dec_avx_gen2)
+ GCM_ENC_DEC_AVX DEC
+- ret
++ pax_ret aesni_gcm_dec_avx_gen2
+ ENDPROC(aesni_gcm_dec_avx_gen2)
+ #endif /* CONFIG_AS_AVX */
+
+@@ -2762,7 +2763,7 @@ ENTRY(aesni_gcm_precomp_avx_gen4)
+ pop %r14
+ pop %r13
+ pop %r12
+- ret
++ pax_ret aesni_gcm_precomp_avx_gen4
+ ENDPROC(aesni_gcm_precomp_avx_gen4)
+
+
+@@ -2784,7 +2785,7 @@ ENDPROC(aesni_gcm_precomp_avx_gen4)
+ ###############################################################################
+ ENTRY(aesni_gcm_enc_avx_gen4)
+ GCM_ENC_DEC_AVX2 ENC
+- ret
++ pax_ret aesni_gcm_enc_avx_gen4
+ ENDPROC(aesni_gcm_enc_avx_gen4)
+
+ ###############################################################################
+@@ -2805,7 +2806,7 @@ ENDPROC(aesni_gcm_enc_avx_gen4)
+ ###############################################################################
+ ENTRY(aesni_gcm_dec_avx_gen4)
+ GCM_ENC_DEC_AVX2 DEC
+- ret
++ pax_ret aesni_gcm_dec_avx_gen4
+ ENDPROC(aesni_gcm_dec_avx_gen4)
+
+ #endif /* CONFIG_AS_AVX2 */
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
-index 0ab5ee1..a5d431f 100644
+index aa8b067..f9da224 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -71,9 +71,9 @@ struct aesni_xts_ctx {
@@ -14228,8 +14880,33 @@ index 0ab5ee1..a5d431f 100644
const u8 *in);
asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len);
+@@ -83,6 +83,15 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
+ asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
++int _key_expansion_128(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash;
++int _key_expansion_192a(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash;
++int _key_expansion_192b(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash;
++int _key_expansion_256a(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash;
++int _key_expansion_256b(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash;
++void _aesni_enc1(void *ctx, u8 *out, const u8 *in) __rap_hash;
++void _aesni_enc4(void *ctx, u8 *out, const u8 *in) __rap_hash;
++void _aesni_dec1(void *ctx, u8 *out, const u8 *in) __rap_hash;
++void _aesni_dec4(void *ctx, u8 *out, const u8 *in) __rap_hash;
+
+ int crypto_fpu_init(void);
+ void crypto_fpu_exit(void);
+@@ -96,6 +105,8 @@ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
+ asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
++void _aesni_inc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) __rap_hash;
++void _aesni_inc_init(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) __rap_hash;
+
+ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, bool enc, u8 *iv);
diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
-index 246c670..4fb7603 100644
+index 246c670..d4e1aa5 100644
--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
@@ -21,6 +21,7 @@
@@ -14240,16 +14917,16 @@ index 246c670..4fb7603 100644
.file "blowfish-x86_64-asm.S"
.text
-@@ -149,13 +150,15 @@ ENTRY(__blowfish_enc_blk)
+@@ -149,13 +150,13 @@ ENTRY(__blowfish_enc_blk)
jnz .L__enc_xor;
write_block();
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __blowfish_enc_blk;
.L__enc_xor:
xor_block();
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __blowfish_enc_blk;
ENDPROC(__blowfish_enc_blk)
-ENTRY(blowfish_dec_blk)
@@ -14257,28 +14934,29 @@ index 246c670..4fb7603 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
+@@ -183,7 +184,7 @@ ENTRY(blowfish_dec_blk)
movq %r11, %rbp;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret blowfish_dec_blk;
ENDPROC(blowfish_dec_blk)
-@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
+ /**********************************************************************
+@@ -334,17 +335,17 @@ ENTRY(__blowfish_enc_blk_4way)
popq %rbx;
popq %rbp;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __blowfish_enc_blk_4way;
.L__enc_xor4:
-@@ -341,10 +346,11 @@ ENTRY(__blowfish_enc_blk_4way)
+ xor_block4();
popq %rbx;
popq %rbp;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __blowfish_enc_blk_4way;
ENDPROC(__blowfish_enc_blk_4way)
-ENTRY(blowfish_dec_blk_4way)
@@ -14286,15 +14964,15 @@ index 246c670..4fb7603 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
+@@ -375,5 +376,5 @@ ENTRY(blowfish_dec_blk_4way)
popq %rbx;
popq %rbp;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret blowfish_dec_blk_4way;
ENDPROC(blowfish_dec_blk_4way)
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
-index aa9e8bd..0b8def4 100644
+index aa9e8bd..7e68f75 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -17,6 +17,7 @@
@@ -14305,39 +14983,61 @@ index aa9e8bd..0b8def4 100644
#define CAMELLIA_TABLE_BYTE_LEN 272
-@@ -192,6 +193,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+@@ -192,7 +193,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
%rcx, (%r9));
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd;
ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
-@@ -200,6 +202,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+ .align 8
+@@ -200,7 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
%xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
%rax, (%r9));
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab;
ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
-@@ -783,6 +786,7 @@ __camellia_enc_blk16:
+ /*
+@@ -212,7 +213,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+ #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \
+ leaq (key_table + (i) * 8)(CTX), %r9; \
+- call roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
++ pax_direct_call roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
+ \
+ vmovdqu x4, 0 * 16(mem_cd); \
+ vmovdqu x5, 1 * 16(mem_cd); \
+@@ -224,7 +225,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+ vmovdqu x3, 7 * 16(mem_cd); \
+ \
+ leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \
+- call roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
++ pax_direct_call roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
+ \
+ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab);
+
+@@ -783,7 +784,7 @@ __camellia_enc_blk16:
%xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_xts_enc_16way;
.align 8
-@@ -870,6 +874,7 @@ __camellia_dec_blk16:
+ .Lenc_max32:
+@@ -870,7 +871,7 @@ __camellia_dec_blk16:
%xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_xts_dec_16way;
.align 8
-@@ -889,7 +894,7 @@ __camellia_dec_blk16:
+ .Ldec_max32:
+@@ -889,7 +890,7 @@ __camellia_dec_blk16:
jmp .Ldec_max24;
ENDPROC(__camellia_dec_blk16)
@@ -14346,12 +15046,20 @@ index aa9e8bd..0b8def4 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
-@@ -911,10 +916,11 @@ ENTRY(camellia_ecb_enc_16way)
+@@ -904,17 +905,17 @@ ENTRY(camellia_ecb_enc_16way)
+ /* now dst can be used as temporary buffer (even in src == dst case) */
+ movq %rsi, %rax;
+
+- call __camellia_enc_blk16;
++ pax_direct_call __camellia_enc_blk16;
+
+ write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0,
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_ecb_enc_16way;
ENDPROC(camellia_ecb_enc_16way)
-ENTRY(camellia_ecb_dec_16way)
@@ -14359,12 +15067,20 @@ index aa9e8bd..0b8def4 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
-@@ -941,10 +947,11 @@ ENTRY(camellia_ecb_dec_16way)
+@@ -934,17 +935,17 @@ ENTRY(camellia_ecb_dec_16way)
+ /* now dst can be used as temporary buffer (even in src == dst case) */
+ movq %rsi, %rax;
+
+- call __camellia_dec_blk16;
++ pax_direct_call __camellia_dec_blk16;
+
+ write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0,
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
%xmm8, %rsi);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_ecb_dec_16way;
ENDPROC(camellia_ecb_dec_16way)
-ENTRY(camellia_cbc_dec_16way)
@@ -14372,15 +15088,25 @@ index aa9e8bd..0b8def4 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
-@@ -992,6 +999,7 @@ ENTRY(camellia_cbc_dec_16way)
+@@ -968,7 +969,7 @@ ENTRY(camellia_cbc_dec_16way)
+ subq $(16 * 16), %rsp;
+ movq %rsp, %rax;
+
+- call __camellia_dec_blk16;
++ pax_direct_call __camellia_dec_blk16;
+
+ addq $(16 * 16), %rsp;
+
+@@ -992,7 +993,7 @@ ENTRY(camellia_cbc_dec_16way)
%xmm8, %rsi);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_cbc_dec_16way;
ENDPROC(camellia_cbc_dec_16way)
-@@ -1001,7 +1009,7 @@ ENDPROC(camellia_cbc_dec_16way)
+ #define inc_le128(x, minus_one, tmp) \
+@@ -1001,7 +1002,7 @@ ENDPROC(camellia_cbc_dec_16way)
vpslldq $8, tmp, tmp; \
vpsubq tmp, x, x;
@@ -14389,20 +15115,39 @@ index aa9e8bd..0b8def4 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
-@@ -1105,6 +1113,7 @@ ENTRY(camellia_ctr_16way)
+@@ -1080,7 +1081,7 @@ ENTRY(camellia_ctr_16way)
+ vpxor 14 * 16(%rax), %xmm15, %xmm14;
+ vpxor 15 * 16(%rax), %xmm15, %xmm15;
+
+- call __camellia_enc_blk16;
++ pax_direct_call __camellia_enc_blk16;
+
+ addq $(16 * 16), %rsp;
+
+@@ -1105,7 +1106,7 @@ ENTRY(camellia_ctr_16way)
%xmm8, %rsi);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_ctr_16way;
ENDPROC(camellia_ctr_16way)
-@@ -1249,10 +1258,11 @@ camellia_xts_crypt_16way:
+ #define gf128mul_x_ble(iv, mask, tmp) \
+@@ -1224,7 +1225,7 @@ camellia_xts_crypt_16way:
+ vpxor 14 * 16(%rax), %xmm15, %xmm14;
+ vpxor 15 * 16(%rax), %xmm15, %xmm15;
+
+- call *%r9;
++ pax_indirect_call "%r9", camellia_xts_enc_16way;
+
+ addq $(16 * 16), %rsp;
+
+@@ -1249,10 +1250,10 @@ camellia_xts_crypt_16way:
%xmm8, %rsi);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_xts_crypt_16way;
ENDPROC(camellia_xts_crypt_16way)
-ENTRY(camellia_xts_enc_16way)
@@ -14410,7 +15155,7 @@ index aa9e8bd..0b8def4 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
-@@ -1266,7 +1276,7 @@ ENTRY(camellia_xts_enc_16way)
+@@ -1266,7 +1267,7 @@ ENTRY(camellia_xts_enc_16way)
jmp camellia_xts_crypt_16way;
ENDPROC(camellia_xts_enc_16way)
@@ -14420,7 +15165,7 @@ index aa9e8bd..0b8def4 100644
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
-index 16186c1..3468f83 100644
+index 16186c1..a751452 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -12,6 +12,7 @@
@@ -14431,39 +15176,61 @@ index 16186c1..3468f83 100644
#define CAMELLIA_TABLE_BYTE_LEN 272
-@@ -231,6 +232,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+@@ -231,7 +232,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
%rcx, (%r9));
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd;
ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
-@@ -239,6 +241,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+ .align 8
+@@ -239,7 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
%ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
%rax, (%r9));
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab;
ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
-@@ -823,6 +826,7 @@ __camellia_enc_blk32:
+ /*
+@@ -251,7 +252,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+ #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \
+ leaq (key_table + (i) * 8)(CTX), %r9; \
+- call roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
++ pax_direct_call roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
+ \
+ vmovdqu x0, 4 * 32(mem_cd); \
+ vmovdqu x1, 5 * 32(mem_cd); \
+@@ -263,7 +264,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+ vmovdqu x7, 3 * 32(mem_cd); \
+ \
+ leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \
+- call roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
++ pax_direct_call roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
+ \
+ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab);
+
+@@ -823,7 +824,7 @@ __camellia_enc_blk32:
%ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __camellia_enc_blk32;
.align 8
-@@ -910,6 +914,7 @@ __camellia_dec_blk32:
+ .Lenc_max32:
+@@ -910,7 +911,7 @@ __camellia_dec_blk32:
%ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __camellia_dec_blk32;
.align 8
-@@ -929,7 +934,7 @@ __camellia_dec_blk32:
+ .Ldec_max32:
+@@ -929,7 +930,7 @@ __camellia_dec_blk32:
jmp .Ldec_max24;
ENDPROC(__camellia_dec_blk32)
@@ -14472,12 +15239,21 @@ index 16186c1..3468f83 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
-@@ -955,10 +960,11 @@ ENTRY(camellia_ecb_enc_32way)
+@@ -946,7 +947,7 @@ ENTRY(camellia_ecb_enc_32way)
+ /* now dst can be used as temporary buffer (even in src == dst case) */
+ movq %rsi, %rax;
+
+- call __camellia_enc_blk32;
++ pax_direct_call __camellia_enc_blk32;
+
+ write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
+ %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
+@@ -955,10 +956,10 @@ ENTRY(camellia_ecb_enc_32way)
vzeroupper;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_ecb_enc_32way;
ENDPROC(camellia_ecb_enc_32way)
-ENTRY(camellia_ecb_dec_32way)
@@ -14485,12 +15261,21 @@ index 16186c1..3468f83 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
-@@ -989,10 +995,11 @@ ENTRY(camellia_ecb_dec_32way)
+@@ -980,7 +981,7 @@ ENTRY(camellia_ecb_dec_32way)
+ /* now dst can be used as temporary buffer (even in src == dst case) */
+ movq %rsi, %rax;
+
+- call __camellia_dec_blk32;
++ pax_direct_call __camellia_dec_blk32;
+
+ write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
+ %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
+@@ -989,10 +990,10 @@ ENTRY(camellia_ecb_dec_32way)
vzeroupper;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_ecb_dec_32way;
ENDPROC(camellia_ecb_dec_32way)
-ENTRY(camellia_cbc_dec_32way)
@@ -14498,15 +15283,25 @@ index 16186c1..3468f83 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
-@@ -1057,6 +1064,7 @@ ENTRY(camellia_cbc_dec_32way)
+@@ -1028,7 +1029,7 @@ ENTRY(camellia_cbc_dec_32way)
+ movq %rsp, %rax;
+
+ .Lcbc_dec_continue:
+- call __camellia_dec_blk32;
++ pax_direct_call __camellia_dec_blk32;
+
+ vmovdqu %ymm7, (%rax);
+ vpxor %ymm7, %ymm7, %ymm7;
+@@ -1057,7 +1058,7 @@ ENTRY(camellia_cbc_dec_32way)
vzeroupper;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_cbc_dec_32way;
ENDPROC(camellia_cbc_dec_32way)
-@@ -1074,7 +1082,7 @@ ENDPROC(camellia_cbc_dec_32way)
+ #define inc_le128(x, minus_one, tmp) \
+@@ -1074,7 +1075,7 @@ ENDPROC(camellia_cbc_dec_32way)
vpslldq $8, tmp1, tmp1; \
vpsubq tmp1, x, x;
@@ -14515,20 +15310,39 @@ index 16186c1..3468f83 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
-@@ -1197,6 +1205,7 @@ ENTRY(camellia_ctr_32way)
+@@ -1170,7 +1171,7 @@ ENTRY(camellia_ctr_32way)
+ vpxor 14 * 32(%rax), %ymm15, %ymm14;
+ vpxor 15 * 32(%rax), %ymm15, %ymm15;
+
+- call __camellia_enc_blk32;
++ pax_direct_call __camellia_enc_blk32;
+
+ movq %r10, %rsp;
+
+@@ -1197,7 +1198,7 @@ ENTRY(camellia_ctr_32way)
vzeroupper;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_ctr_32way;
ENDPROC(camellia_ctr_32way)
-@@ -1364,10 +1373,11 @@ camellia_xts_crypt_32way:
+ #define gf128mul_x_ble(iv, mask, tmp) \
+@@ -1337,7 +1338,7 @@ camellia_xts_crypt_32way:
+ vpxor 14 * 32(%rax), %ymm15, %ymm14;
+ vpxor 15 * 32(%rax), %ymm15, %ymm15;
+
+- call *%r9;
++ pax_indirect_call "%r9", __camellia_enc_blk32;
+
+ addq $(16 * 32), %rsp;
+
+@@ -1364,10 +1365,10 @@ camellia_xts_crypt_32way:
vzeroupper;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_xts_crypt_32way;
ENDPROC(camellia_xts_crypt_32way)
-ENTRY(camellia_xts_enc_32way)
@@ -14536,7 +15350,7 @@ index 16186c1..3468f83 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
-@@ -1382,7 +1392,7 @@ ENTRY(camellia_xts_enc_32way)
+@@ -1382,7 +1383,7 @@ ENTRY(camellia_xts_enc_32way)
jmp camellia_xts_crypt_32way;
ENDPROC(camellia_xts_enc_32way)
@@ -14546,7 +15360,7 @@ index 16186c1..3468f83 100644
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
-index 310319c..9253a8f 100644
+index 310319c..4fa639a 100644
--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
@@ -21,6 +21,7 @@
@@ -14557,19 +15371,19 @@ index 310319c..9253a8f 100644
.file "camellia-x86_64-asm_64.S"
.text
-@@ -228,16 +229,18 @@ ENTRY(__camellia_enc_blk)
+@@ -228,16 +229,16 @@ ENTRY(__camellia_enc_blk)
enc_outunpack(mov, RT1);
movq RRBP, %rbp;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __camellia_enc_blk;
.L__enc_xor:
enc_outunpack(xor, RT1);
movq RRBP, %rbp;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __camellia_enc_blk;
ENDPROC(__camellia_enc_blk)
-ENTRY(camellia_dec_blk)
@@ -14577,28 +15391,29 @@ index 310319c..9253a8f 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
+@@ -272,7 +273,7 @@ ENTRY(camellia_dec_blk)
dec_outunpack();
movq RRBP, %rbp;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_dec_blk;
ENDPROC(camellia_dec_blk)
-@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
+ /**********************************************************************
+@@ -463,17 +464,17 @@ ENTRY(__camellia_enc_blk_2way)
movq RRBP, %rbp;
popq %rbx;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __camellia_enc_blk_2way;
.L__enc2_xor:
-@@ -470,10 +475,11 @@ ENTRY(__camellia_enc_blk_2way)
+ enc_outunpack2(xor, RT2);
movq RRBP, %rbp;
popq %rbx;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __camellia_enc_blk_2way;
ENDPROC(__camellia_enc_blk_2way)
-ENTRY(camellia_dec_blk_2way)
@@ -14606,18 +15421,18 @@ index 310319c..9253a8f 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
+@@ -510,5 +511,5 @@ ENTRY(camellia_dec_blk_2way)
movq RRBP, %rbp;
movq RXOR, %rbx;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret camellia_dec_blk_2way;
ENDPROC(camellia_dec_blk_2way)
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
-index 60907c1..fe8638d 100644
+index 60907c1..3fc99c4 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
-@@ -27,20 +27,20 @@
+@@ -27,20 +27,22 @@
#define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
/* 32-way AVX2/AES-NI parallel cipher functions */
@@ -14627,6 +15442,8 @@ index 60907c1..fe8638d 100644
-asmlinkage void camellia_ecb_dec_32way(struct camellia_ctx *ctx, u8 *dst,
+asmlinkage void camellia_ecb_dec_32way(void *ctx, u8 *dst,
const u8 *src);
++void __camellia_enc_blk32(void *ctx, u8 *dst, const u8 *src) __rap_hash;
++void __camellia_dec_blk32(void *ctx, u8 *dst, const u8 *src) __rap_hash;
-asmlinkage void camellia_cbc_dec_32way(struct camellia_ctx *ctx, u8 *dst,
+asmlinkage void camellia_cbc_dec_32way(void *ctx, u8 *dst,
@@ -14724,7 +15541,7 @@ index aa76cad..ffd8808 100644
camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src);
diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
-index 14fa196..5de8a4a 100644
+index 14fa196..80d99b6 100644
--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
@@ -25,6 +25,7 @@
@@ -14735,23 +15552,24 @@ index 14fa196..5de8a4a 100644
.file "cast5-avx-x86_64-asm_64.S"
-@@ -282,6 +283,7 @@ __cast5_enc_blk16:
+@@ -282,7 +283,7 @@ __cast5_enc_blk16:
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __cast5_enc_blk16;
ENDPROC(__cast5_enc_blk16)
-@@ -353,6 +355,7 @@ __cast5_dec_blk16:
+ .align 16
+@@ -353,14 +354,14 @@ __cast5_dec_blk16:
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __cast5_dec_blk16;
.L__skip_dec:
-@@ -360,7 +363,7 @@ __cast5_dec_blk16:
+ vpsrldq $4, RKR, RKR;
jmp .L__dec_tail;
ENDPROC(__cast5_dec_blk16)
@@ -14760,12 +15578,21 @@ index 14fa196..5de8a4a 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -391,10 +394,11 @@ ENTRY(cast5_ecb_enc_16way)
+@@ -379,7 +380,7 @@ ENTRY(cast5_ecb_enc_16way)
+ vmovdqu (6*4*4)(%rdx), RL4;
+ vmovdqu (7*4*4)(%rdx), RR4;
+
+- call __cast5_enc_blk16;
++ pax_direct_call __cast5_enc_blk16;
+
+ vmovdqu RR1, (0*4*4)(%r11);
+ vmovdqu RL1, (1*4*4)(%r11);
+@@ -391,10 +392,10 @@ ENTRY(cast5_ecb_enc_16way)
vmovdqu RL4, (7*4*4)(%r11);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret cast5_ecb_enc_16way;
ENDPROC(cast5_ecb_enc_16way)
-ENTRY(cast5_ecb_dec_16way)
@@ -14773,15 +15600,25 @@ index 14fa196..5de8a4a 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -425,6 +429,7 @@ ENTRY(cast5_ecb_dec_16way)
+@@ -413,7 +414,7 @@ ENTRY(cast5_ecb_dec_16way)
+ vmovdqu (6*4*4)(%rdx), RL4;
+ vmovdqu (7*4*4)(%rdx), RR4;
+
+- call __cast5_dec_blk16;
++ pax_direct_call __cast5_dec_blk16;
+
+ vmovdqu RR1, (0*4*4)(%r11);
+ vmovdqu RL1, (1*4*4)(%r11);
+@@ -425,7 +426,7 @@ ENTRY(cast5_ecb_dec_16way)
vmovdqu RL4, (7*4*4)(%r11);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret cast5_ecb_dec_16way;
ENDPROC(cast5_ecb_dec_16way)
-@@ -436,10 +441,10 @@ ENTRY(cast5_cbc_dec_16way)
+ ENTRY(cast5_cbc_dec_16way)
+@@ -436,10 +437,10 @@ ENTRY(cast5_cbc_dec_16way)
*/
FRAME_BEGIN
@@ -14794,8 +15631,12 @@ index 14fa196..5de8a4a 100644
vmovdqu (0*16)(%rdx), RL1;
vmovdqu (1*16)(%rdx), RR1;
-@@ -453,16 +458,16 @@ ENTRY(cast5_cbc_dec_16way)
- call __cast5_dec_blk16;
+@@ -450,19 +451,19 @@ ENTRY(cast5_cbc_dec_16way)
+ vmovdqu (6*16)(%rdx), RL4;
+ vmovdqu (7*16)(%rdx), RR4;
+
+- call __cast5_dec_blk16;
++ pax_direct_call __cast5_dec_blk16;
/* xor with src */
- vmovq (%r12), RX;
@@ -14819,7 +15660,7 @@ index 14fa196..5de8a4a 100644
vmovdqu RR1, (0*16)(%r11);
vmovdqu RL1, (1*16)(%r11);
-@@ -473,9 +478,10 @@ ENTRY(cast5_cbc_dec_16way)
+@@ -473,10 +474,10 @@ ENTRY(cast5_cbc_dec_16way)
vmovdqu RR4, (6*16)(%r11);
vmovdqu RL4, (7*16)(%r11);
@@ -14827,11 +15668,12 @@ index 14fa196..5de8a4a 100644
+ popq %r14;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret cast5_cbc_dec_16way;
ENDPROC(cast5_cbc_dec_16way)
-@@ -488,10 +494,10 @@ ENTRY(cast5_ctr_16way)
+ ENTRY(cast5_ctr_16way)
+@@ -488,10 +489,10 @@ ENTRY(cast5_ctr_16way)
*/
FRAME_BEGIN
@@ -14844,8 +15686,12 @@ index 14fa196..5de8a4a 100644
vpcmpeqd RTMP, RTMP, RTMP;
vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
-@@ -531,14 +537,14 @@ ENTRY(cast5_ctr_16way)
- call __cast5_enc_blk16;
+@@ -528,17 +529,17 @@ ENTRY(cast5_ctr_16way)
+ vpshufb R1ST, RX, RX; /* be: IV16, IV16 */
+ vmovq RX, (%rcx);
+
+- call __cast5_enc_blk16;
++ pax_direct_call __cast5_enc_blk16;
/* dst = src ^ iv */
- vpxor (0*16)(%r12), RR1, RR1;
@@ -14867,7 +15713,7 @@ index 14fa196..5de8a4a 100644
vmovdqu RR1, (0*16)(%r11);
vmovdqu RL1, (1*16)(%r11);
vmovdqu RR2, (2*16)(%r11);
-@@ -548,8 +554,9 @@ ENTRY(cast5_ctr_16way)
+@@ -548,8 +549,8 @@ ENTRY(cast5_ctr_16way)
vmovdqu RR4, (6*16)(%r11);
vmovdqu RL4, (7*16)(%r11);
@@ -14875,11 +15721,24 @@ index 14fa196..5de8a4a 100644
+ popq %r14;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret cast5_ctr_16way;
ENDPROC(cast5_ctr_16way)
+diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
+index 8648158..b56922a 100644
+--- a/arch/x86/crypto/cast5_avx_glue.c
++++ b/arch/x86/crypto/cast5_avx_glue.c
+@@ -44,6 +44,8 @@ asmlinkage void cast5_cbc_dec_16way(struct cast5_ctx *ctx, u8 *dst,
+ const u8 *src);
+ asmlinkage void cast5_ctr_16way(struct cast5_ctx *ctx, u8 *dst, const u8 *src,
+ __be64 *iv);
++void __cast5_enc_blk16(struct cast5_ctx *ctx, u8 *dst, const u8 *src) __rap_hash;
++void __cast5_dec_blk16(struct cast5_ctx *ctx, u8 *dst, const u8 *src) __rap_hash;
+
+ static inline bool cast5_fpu_begin(bool fpu_enabled, unsigned int nbytes)
+ {
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
-index c419389..b853452 100644
+index c419389..7e2ed7c 100644
--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
@@ -25,6 +25,7 @@
@@ -14890,20 +15749,21 @@ index c419389..b853452 100644
#include "glue_helper-asm-avx.S"
.file "cast6-avx-x86_64-asm_64.S"
-@@ -296,6 +297,7 @@ __cast6_enc_blk8:
+@@ -296,7 +297,7 @@ __cast6_enc_blk8:
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __cast6_enc_blk8;
ENDPROC(__cast6_enc_blk8)
-@@ -341,10 +343,11 @@ __cast6_dec_blk8:
+ .align 8
+@@ -341,10 +342,10 @@ __cast6_dec_blk8:
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __cast6_dec_blk8;
ENDPROC(__cast6_dec_blk8)
-ENTRY(cast6_ecb_enc_8way)
@@ -14911,12 +15771,18 @@ index c419389..b853452 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -361,10 +364,11 @@ ENTRY(cast6_ecb_enc_8way)
+@@ -356,15 +357,15 @@ ENTRY(cast6_ecb_enc_8way)
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- call __cast6_enc_blk8;
++ pax_direct_call __cast6_enc_blk8;
+
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret cast6_ecb_enc_8way;
ENDPROC(cast6_ecb_enc_8way)
-ENTRY(cast6_ecb_dec_8way)
@@ -14924,12 +15790,18 @@ index c419389..b853452 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -381,10 +385,11 @@ ENTRY(cast6_ecb_dec_8way)
+@@ -376,15 +377,15 @@ ENTRY(cast6_ecb_dec_8way)
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- call __cast6_dec_blk8;
++ pax_direct_call __cast6_dec_blk8;
+
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret cast6_ecb_dec_8way;
ENDPROC(cast6_ecb_dec_8way)
-ENTRY(cast6_cbc_dec_8way)
@@ -14937,7 +15809,7 @@ index c419389..b853452 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -392,24 +397,25 @@ ENTRY(cast6_cbc_dec_8way)
+@@ -392,24 +393,24 @@ ENTRY(cast6_cbc_dec_8way)
*/
FRAME_BEGIN
@@ -14950,7 +15822,8 @@ index c419389..b853452 100644
load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
- call __cast6_dec_blk8;
+- call __cast6_dec_blk8;
++ pax_direct_call __cast6_dec_blk8;
- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
@@ -14959,8 +15832,8 @@ index c419389..b853452 100644
+ popq %r14;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret cast6_cbc_dec_8way;
ENDPROC(cast6_cbc_dec_8way)
-ENTRY(cast6_ctr_8way)
@@ -14968,7 +15841,7 @@ index c419389..b853452 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -418,25 +424,26 @@ ENTRY(cast6_ctr_8way)
+@@ -418,25 +419,25 @@ ENTRY(cast6_ctr_8way)
*/
FRAME_BEGIN
@@ -14982,7 +15855,8 @@ index c419389..b853452 100644
load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
RD2, RX, RKR, RKM);
- call __cast6_enc_blk8;
+- call __cast6_enc_blk8;
++ pax_direct_call __cast6_enc_blk8;
- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
@@ -14991,8 +15865,8 @@ index c419389..b853452 100644
+ popq %r14;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret cast6_ctr_8way;
ENDPROC(cast6_ctr_8way)
-ENTRY(cast6_xts_enc_8way)
@@ -15000,12 +15874,19 @@ index c419389..b853452 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -457,10 +464,11 @@ ENTRY(cast6_xts_enc_8way)
+@@ -451,16 +452,16 @@ ENTRY(cast6_xts_enc_8way)
+ load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
+ RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
+
+- call __cast6_enc_blk8;
++ pax_direct_call __cast6_enc_blk8;
+
+ /* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret cast6_xts_enc_8way;
ENDPROC(cast6_xts_enc_8way)
-ENTRY(cast6_xts_dec_8way)
@@ -15013,18 +15894,25 @@ index c419389..b853452 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -481,5 +489,6 @@ ENTRY(cast6_xts_dec_8way)
+@@ -475,11 +476,11 @@ ENTRY(cast6_xts_dec_8way)
+ load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
+ RX, RKR, RKM, .Lxts_gf128mul_and_shl1_mask);
+
+- call __cast6_dec_blk8;
++ pax_direct_call __cast6_dec_blk8;
+
+ /* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret cast6_xts_dec_8way;
ENDPROC(cast6_xts_dec_8way)
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
-index 50e6847..bf7c2d8 100644
+index 50e6847..593d632 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
-@@ -41,20 +41,20 @@
+@@ -41,20 +41,23 @@
#define CAST6_PARALLEL_BLOCKS 8
@@ -15050,14 +15938,72 @@ index 50e6847..bf7c2d8 100644
+ const u128 *src, le128 *iv);
+asmlinkage void cast6_xts_dec_8way(void *ctx, u128 *dst,
+ const u128 *src, le128 *iv);
++
++void __cast6_enc_blk8(void *ctx, u8 *dst, const u8 *src) __rap_hash;
++void __cast6_dec_blk8(void *ctx, u8 *dst, const u8 *src) __rap_hash;
static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
{
+diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S
+index 16694e6..4675b5e 100644
+--- a/arch/x86/crypto/chacha20-avx2-x86_64.S
++++ b/arch/x86/crypto/chacha20-avx2-x86_64.S
+@@ -10,6 +10,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .data
+ .align 32
+@@ -439,5 +440,5 @@ ENTRY(chacha20_8block_xor_avx2)
+
+ vzeroupper
+ mov %r8,%rsp
+- ret
++ pax_ret chacha20_8block_xor_avx2
+ ENDPROC(chacha20_8block_xor_avx2)
+diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
+index 3a33124..ba21c6f 100644
+--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
++++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
+@@ -10,6 +10,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .data
+ .align 16
+@@ -139,7 +140,7 @@ ENTRY(chacha20_block_xor_ssse3)
+ pxor %xmm7,%xmm3
+ movdqu %xmm3,0x30(%rsi)
+
+- ret
++ pax_ret chacha20_block_xor_ssse3
+ ENDPROC(chacha20_block_xor_ssse3)
+
+ ENTRY(chacha20_4block_xor_ssse3)
+@@ -623,5 +624,5 @@ ENTRY(chacha20_4block_xor_ssse3)
+ movdqu %xmm15,0xf0(%rsi)
+
+ mov %r11,%rsp
+- ret
++ pax_ret chacha20_4block_xor_ssse3
+ ENDPROC(chacha20_4block_xor_ssse3)
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
-index f247304..b500391 100644
+index f247304..d253bd1 100644
--- a/arch/x86/crypto/crc32-pclmul_asm.S
+++ b/arch/x86/crypto/crc32-pclmul_asm.S
-@@ -102,6 +102,12 @@
+@@ -39,6 +39,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+
+
+ .align 16
+@@ -102,6 +103,12 @@
* size_t len, uint crc32)
*/
@@ -15070,7 +16016,7 @@ index f247304..b500391 100644
ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
movdqa (BUF), %xmm1
movdqa 0x10(BUF), %xmm2
-@@ -113,9 +119,8 @@ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
+@@ -113,9 +120,8 @@ ENTRY(crc32_pclmul_le_16) /* buffer and buffer size are 16 bytes aligned */
add $0x40, BUF
#ifndef __x86_64__
/* This is for position independent code(-fPIC) support for 32bit */
@@ -15081,7 +16027,7 @@ index f247304..b500391 100644
#endif
cmp $0x40, LEN
jb less_64
-@@ -123,7 +128,7 @@ delta:
+@@ -123,7 +129,7 @@ delta:
#ifdef __x86_64__
movdqa .Lconstant_R2R1(%rip), CONSTANT
#else
@@ -15090,7 +16036,7 @@ index f247304..b500391 100644
#endif
loop_64:/* 64 bytes Full cache line folding */
-@@ -172,7 +177,7 @@ less_64:/* Folding cache line into 128bit */
+@@ -172,7 +178,7 @@ less_64:/* Folding cache line into 128bit */
#ifdef __x86_64__
movdqa .Lconstant_R4R3(%rip), CONSTANT
#else
@@ -15099,7 +16045,7 @@ index f247304..b500391 100644
#endif
prefetchnta (BUF)
-@@ -220,8 +225,8 @@ fold_64:
+@@ -220,8 +226,8 @@ fold_64:
movdqa .Lconstant_R5(%rip), CONSTANT
movdqa .Lconstant_mask32(%rip), %xmm3
#else
@@ -15110,7 +16056,7 @@ index f247304..b500391 100644
#endif
psrldq $0x04, %xmm2
pand %xmm3, %xmm1
-@@ -232,7 +237,7 @@ fold_64:
+@@ -232,7 +238,7 @@ fold_64:
#ifdef __x86_64__
movdqa .Lconstant_RUpoly(%rip), CONSTANT
#else
@@ -15119,8 +16065,15 @@ index f247304..b500391 100644
#endif
movdqa %xmm1, %xmm2
pand %xmm3, %xmm1
+@@ -242,5 +248,5 @@ fold_64:
+ pxor %xmm2, %xmm1
+ PEXTRD 0x01, %xmm1, %eax
+
+- ret
++ pax_ret crc32_pclmul_le_16
+ ENDPROC(crc32_pclmul_le_16)
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
-index dc05f010..23c8bfd 100644
+index dc05f010..83302a8 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -45,6 +45,7 @@
@@ -15131,16 +16084,68 @@ index dc05f010..23c8bfd 100644
## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
-@@ -309,6 +310,7 @@ do_return:
+@@ -309,7 +310,7 @@ do_return:
popq %rsi
popq %rdi
popq %rbx
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret crc_pcl
ENDPROC(crc_pcl)
+ .section .rodata, "a", %progbits
+diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S
+index 35e9756..5048353 100644
+--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S
++++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S
+@@ -59,6 +59,7 @@
+ #
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .text
+
+@@ -367,7 +368,7 @@ _cleanup:
+ # scale the result back to 16 bits
+ shr $16, %eax
+ mov %rcx, %rsp
+- ret
++ pax_ret crc_t10dif_pcl
+
+ ########################################################################
+
+diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S
+index 038f6ae..ec7142bf 100644
+--- a/arch/x86/crypto/des3_ede-asm_64.S
++++ b/arch/x86/crypto/des3_ede-asm_64.S
+@@ -15,6 +15,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .file "des3_ede-asm_64.S"
+ .text
+@@ -250,7 +251,7 @@ ENTRY(des3_ede_x86_64_crypt_blk)
+ popq %rbx;
+ popq %rbp;
+
+- ret;
++ pax_ret des3_ede_x86_64_crypt_blk;
+ ENDPROC(des3_ede_x86_64_crypt_blk)
+
+ /***********************************************************************
+@@ -534,7 +535,7 @@ ENTRY(des3_ede_x86_64_crypt_blk_3way)
+ popq %rbx;
+ popq %rbp;
+
+- ret;
++ pax_ret des3_ede_x86_64_crypt_blk_3way;
+ ENDPROC(des3_ede_x86_64_crypt_blk_3way)
+
+ .data
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
-index eed55c8..b354187 100644
+index eed55c8..18f64dc 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -19,6 +19,7 @@
@@ -15151,29 +16156,57 @@ index eed55c8..b354187 100644
.data
-@@ -90,6 +91,7 @@ __clmul_gf128mul_ble:
+@@ -90,7 +91,7 @@ __clmul_gf128mul_ble:
psrlq $1, T2
pxor T2, T1
pxor T1, DATA
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret __clmul_gf128mul_ble
ENDPROC(__clmul_gf128mul_ble)
-@@ -104,6 +106,7 @@ ENTRY(clmul_ghash_mul)
+ /* void clmul_ghash_mul(char *dst, const u128 *shash) */
+@@ -100,11 +101,11 @@ ENTRY(clmul_ghash_mul)
+ movups (%rsi), SHASH
+ movaps .Lbswap_mask, BSWAP
+ PSHUFB_XMM BSWAP DATA
+- call __clmul_gf128mul_ble
++ pax_direct_call __clmul_gf128mul_ble
PSHUFB_XMM BSWAP DATA
movups DATA, (%rdi)
FRAME_END
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret clmul_ghash_mul
ENDPROC(clmul_ghash_mul)
-@@ -133,5 +136,6 @@ ENTRY(clmul_ghash_update)
+ /*
+@@ -124,7 +125,7 @@ ENTRY(clmul_ghash_update)
+ movups (%rsi), IN1
+ PSHUFB_XMM BSWAP IN1
+ pxor IN1, DATA
+- call __clmul_gf128mul_ble
++ pax_direct_call __clmul_gf128mul_ble
+ sub $16, %rdx
+ add $16, %rsi
+ cmp $16, %rdx
+@@ -133,5 +134,5 @@ ENTRY(clmul_ghash_update)
movups DATA, (%rdi)
.Lupdate_just_ret:
FRAME_END
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret clmul_ghash_update
ENDPROC(clmul_ghash_update)
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index 0420bab..590ca78 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -26,6 +26,7 @@
+ #define GHASH_DIGEST_SIZE 16
+
+ void clmul_ghash_mul(char *dst, const u128 *shash);
++void __clmul_gf128mul_ble(char *dst, const u128 *shash) __rap_hash;
+
+ void clmul_ghash_update(char *dst, const char *src, unsigned int srclen,
+ const u128 *shash);
diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index 6a85598..fed2ada 100644
--- a/arch/x86/crypto/glue_helper.c
@@ -15187,8 +16220,92 @@ index 6a85598..fed2ada 100644
nbytes -= bsize;
if (nbytes < bsize)
+diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
+index eff2f41..932718e 100644
+--- a/arch/x86/crypto/poly1305-avx2-x86_64.S
++++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
+@@ -10,6 +10,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .data
+ .align 32
+@@ -382,5 +383,5 @@ ENTRY(poly1305_4block_avx2)
+ pop %r13
+ pop %r12
+ pop %rbx
+- ret
++ pax_ret poly1305_4block_avx2
+ ENDPROC(poly1305_4block_avx2)
+diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
+index 338c748..497359c 100644
+--- a/arch/x86/crypto/poly1305-sse2-x86_64.S
++++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
+@@ -10,6 +10,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .data
+ .align 16
+@@ -273,7 +274,7 @@ ENTRY(poly1305_block_sse2)
+ add $0x10,%rsp
+ pop %r12
+ pop %rbx
+- ret
++ pax_ret poly1305_block_sse2
+ ENDPROC(poly1305_block_sse2)
+
+
+@@ -578,5 +579,5 @@ ENTRY(poly1305_2block_sse2)
+ pop %r13
+ pop %r12
+ pop %rbx
+- ret
++ pax_ret poly1305_2block_sse2
+ ENDPROC(poly1305_2block_sse2)
+diff --git a/arch/x86/crypto/salsa20-i586-asm_32.S b/arch/x86/crypto/salsa20-i586-asm_32.S
+index 329452b8..f136500 100644
+--- a/arch/x86/crypto/salsa20-i586-asm_32.S
++++ b/arch/x86/crypto/salsa20-i586-asm_32.S
+@@ -3,6 +3,7 @@
+ # Public domain.
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .text
+
+@@ -924,7 +925,7 @@ ENTRY(salsa20_encrypt_bytes)
+ movl 96(%esp),%ebp
+ # leave
+ add %eax,%esp
+- ret
++ pax_ret salsa20_encrypt_bytes
+ ._bytesatleast65:
+ # bytes -= 64
+ sub $64,%ebx
+@@ -1059,7 +1060,7 @@ ENTRY(salsa20_keysetup)
+ movl 80(%esp),%ebp
+ # leave
+ add %eax,%esp
+- ret
++ pax_ret salsa20_keysetup
+ ENDPROC(salsa20_keysetup)
+
+ # enter salsa20_ivsetup
+@@ -1110,5 +1111,5 @@ ENTRY(salsa20_ivsetup)
+ movl 80(%esp),%ebp
+ # leave
+ add %eax,%esp
+- ret
++ pax_ret salsa20_ivsetup
+ ENDPROC(salsa20_ivsetup)
diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
-index 9279e0b..c4b3d2c 100644
+index 9279e0b..6745d48 100644
--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
@@ -1,4 +1,5 @@
@@ -15197,31 +16314,33 @@ index 9279e0b..c4b3d2c 100644
# enter salsa20_encrypt_bytes
ENTRY(salsa20_encrypt_bytes)
-@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
+@@ -789,7 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret salsa20_encrypt_bytes
# bytesatleast65:
._bytesatleast65:
-@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
+ # bytes -= 64
+@@ -889,7 +890,7 @@ ENTRY(salsa20_keysetup)
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret salsa20_keysetup
ENDPROC(salsa20_keysetup)
-@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
+ # enter salsa20_ivsetup
+@@ -914,5 +915,5 @@ ENTRY(salsa20_ivsetup)
add %r11,%rsp
mov %rdi,%rax
mov %rsi,%rdx
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret salsa20_ivsetup
ENDPROC(salsa20_ivsetup)
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
-index 8be5718..d2bcbcd 100644
+index 8be5718..c5a9956 100644
--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
@@ -25,6 +25,7 @@
@@ -15232,20 +16351,21 @@ index 8be5718..d2bcbcd 100644
#include "glue_helper-asm-avx.S"
.file "serpent-avx-x86_64-asm_64.S"
-@@ -619,6 +620,7 @@ __serpent_enc_blk8_avx:
+@@ -619,7 +620,7 @@ __serpent_enc_blk8_avx:
write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __serpent_enc_blk8_avx;
ENDPROC(__serpent_enc_blk8_avx)
-@@ -673,10 +675,11 @@ __serpent_dec_blk8_avx:
+ .align 8
+@@ -673,10 +674,10 @@ __serpent_dec_blk8_avx:
write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __serpent_dec_blk8_avx;
ENDPROC(__serpent_dec_blk8_avx)
-ENTRY(serpent_ecb_enc_8way_avx)
@@ -15253,12 +16373,18 @@ index 8be5718..d2bcbcd 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -691,10 +694,11 @@ ENTRY(serpent_ecb_enc_8way_avx)
+@@ -686,15 +687,15 @@ ENTRY(serpent_ecb_enc_8way_avx)
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- call __serpent_enc_blk8_avx;
++ pax_direct_call __serpent_enc_blk8_avx;
+
store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_ecb_enc_8way_avx;
ENDPROC(serpent_ecb_enc_8way_avx)
-ENTRY(serpent_ecb_dec_8way_avx)
@@ -15266,12 +16392,18 @@ index 8be5718..d2bcbcd 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -709,10 +713,11 @@ ENTRY(serpent_ecb_dec_8way_avx)
+@@ -704,15 +705,15 @@ ENTRY(serpent_ecb_dec_8way_avx)
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- call __serpent_dec_blk8_avx;
++ pax_direct_call __serpent_dec_blk8_avx;
+
store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_ecb_dec_8way_avx;
ENDPROC(serpent_ecb_dec_8way_avx)
-ENTRY(serpent_cbc_dec_8way_avx)
@@ -15279,12 +16411,18 @@ index 8be5718..d2bcbcd 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -727,10 +732,11 @@ ENTRY(serpent_cbc_dec_8way_avx)
+@@ -722,15 +723,15 @@ ENTRY(serpent_cbc_dec_8way_avx)
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- call __serpent_dec_blk8_avx;
++ pax_direct_call __serpent_dec_blk8_avx;
+
store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_cbc_dec_8way_avx;
ENDPROC(serpent_cbc_dec_8way_avx)
-ENTRY(serpent_ctr_8way_avx)
@@ -15292,12 +16430,18 @@ index 8be5718..d2bcbcd 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -747,10 +753,11 @@ ENTRY(serpent_ctr_8way_avx)
+@@ -742,15 +743,15 @@ ENTRY(serpent_ctr_8way_avx)
+ load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
+ RD2, RK0, RK1, RK2);
+
+- call __serpent_enc_blk8_avx;
++ pax_direct_call __serpent_enc_blk8_avx;
+
store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_ctr_8way_avx;
ENDPROC(serpent_ctr_8way_avx)
-ENTRY(serpent_xts_enc_8way_avx)
@@ -15305,12 +16449,19 @@ index 8be5718..d2bcbcd 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -769,10 +776,11 @@ ENTRY(serpent_xts_enc_8way_avx)
+@@ -763,16 +764,16 @@ ENTRY(serpent_xts_enc_8way_avx)
+ load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
+ RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask);
+
+- call __serpent_enc_blk8_avx;
++ pax_direct_call __serpent_enc_blk8_avx;
+
+ /* dst <= regs xor IVs(in dst) */
store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_xts_enc_8way_avx;
ENDPROC(serpent_xts_enc_8way_avx)
-ENTRY(serpent_xts_dec_8way_avx)
@@ -15318,15 +16469,22 @@ index 8be5718..d2bcbcd 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -791,5 +799,6 @@ ENTRY(serpent_xts_dec_8way_avx)
+@@ -785,11 +786,11 @@ ENTRY(serpent_xts_dec_8way_avx)
+ load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
+ RK0, RK1, RK2, .Lxts_gf128mul_and_shl1_mask);
+
+- call __serpent_dec_blk8_avx;
++ pax_direct_call __serpent_dec_blk8_avx;
+
+ /* dst <= regs xor IVs(in dst) */
store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_xts_dec_8way_avx;
ENDPROC(serpent_xts_dec_8way_avx)
diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
-index 97c48ad..25416de 100644
+index 97c48ad..541b03c 100644
--- a/arch/x86/crypto/serpent-avx2-asm_64.S
+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
@@ -16,6 +16,7 @@
@@ -15337,20 +16495,21 @@ index 97c48ad..25416de 100644
#include "glue_helper-asm-avx2.S"
.file "serpent-avx2-asm_64.S"
-@@ -611,6 +612,7 @@ __serpent_enc_blk16:
+@@ -611,7 +612,7 @@ __serpent_enc_blk16:
write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __serpent_enc_blk16;
ENDPROC(__serpent_enc_blk16)
-@@ -665,10 +667,11 @@ __serpent_dec_blk16:
+ .align 8
+@@ -665,10 +666,10 @@ __serpent_dec_blk16:
write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __serpent_dec_blk16;
ENDPROC(__serpent_dec_blk16)
-ENTRY(serpent_ecb_enc_16way)
@@ -15358,12 +16517,20 @@ index 97c48ad..25416de 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -687,10 +690,11 @@ ENTRY(serpent_ecb_enc_16way)
+@@ -680,17 +681,17 @@ ENTRY(serpent_ecb_enc_16way)
+
+ load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- call __serpent_enc_blk16;
++ pax_direct_call __serpent_enc_blk16;
+
+ store_16way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
vzeroupper;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_ecb_enc_16way;
ENDPROC(serpent_ecb_enc_16way)
-ENTRY(serpent_ecb_dec_16way)
@@ -15371,12 +16538,20 @@ index 97c48ad..25416de 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -709,10 +713,11 @@ ENTRY(serpent_ecb_dec_16way)
+@@ -702,17 +703,17 @@ ENTRY(serpent_ecb_dec_16way)
+
+ load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- call __serpent_dec_blk16;
++ pax_direct_call __serpent_dec_blk16;
+
+ store_16way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+
vzeroupper;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_ecb_dec_16way;
ENDPROC(serpent_ecb_dec_16way)
-ENTRY(serpent_cbc_dec_16way)
@@ -15384,12 +16559,21 @@ index 97c48ad..25416de 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -732,10 +737,11 @@ ENTRY(serpent_cbc_dec_16way)
+@@ -724,7 +725,7 @@ ENTRY(serpent_cbc_dec_16way)
+
+ load_16way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- call __serpent_dec_blk16;
++ pax_direct_call __serpent_dec_blk16;
+
+ store_cbc_16way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2,
+ RK0);
+@@ -732,10 +733,10 @@ ENTRY(serpent_cbc_dec_16way)
vzeroupper;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_cbc_dec_16way;
ENDPROC(serpent_cbc_dec_16way)
-ENTRY(serpent_ctr_16way)
@@ -15397,12 +16581,20 @@ index 97c48ad..25416de 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
-@@ -757,10 +763,11 @@ ENTRY(serpent_ctr_16way)
+@@ -750,17 +751,17 @@ ENTRY(serpent_ctr_16way)
+ RD2, RK0, RK0x, RK1, RK1x, RK2, RK2x, RK3, RK3x, RNOT,
+ tp);
+
+- call __serpent_enc_blk16;
++ pax_direct_call __serpent_enc_blk16;
+
+ store_ctr_16way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
vzeroupper;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_ctr_16way;
ENDPROC(serpent_ctr_16way)
-ENTRY(serpent_xts_enc_16way)
@@ -15410,12 +16602,20 @@ index 97c48ad..25416de 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
-@@ -783,10 +790,11 @@ ENTRY(serpent_xts_enc_16way)
+@@ -776,17 +777,17 @@ ENTRY(serpent_xts_enc_16way)
+ .Lxts_gf128mul_and_shl1_mask_0,
+ .Lxts_gf128mul_and_shl1_mask_1);
+
+- call __serpent_enc_blk16;
++ pax_direct_call __serpent_enc_blk16;
+
+ store_xts_16way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
vzeroupper;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_xts_enc_16way;
ENDPROC(serpent_xts_enc_16way)
-ENTRY(serpent_xts_dec_16way)
@@ -15423,15 +16623,57 @@ index 97c48ad..25416de 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
-@@ -809,5 +817,6 @@ ENTRY(serpent_xts_dec_16way)
+@@ -802,12 +803,12 @@ ENTRY(serpent_xts_dec_16way)
+ .Lxts_gf128mul_and_shl1_mask_0,
+ .Lxts_gf128mul_and_shl1_mask_1);
+
+- call __serpent_dec_blk16;
++ pax_direct_call __serpent_dec_blk16;
+
+ store_xts_16way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+
vzeroupper;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_xts_dec_16way;
ENDPROC(serpent_xts_dec_16way)
+diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
+index d348f15..48aa0c3 100644
+--- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S
++++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
+@@ -25,6 +25,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .file "serpent-sse2-i586-asm_32.S"
+ .text
+@@ -568,12 +569,12 @@ ENTRY(__serpent_enc_blk_4way)
+
+ write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
+
+- ret;
++ pax_ret __serpent_enc_blk_4way;
+
+ .L__enc_xor4:
+ xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
+
+- ret;
++ pax_ret __serpent_enc_blk_4way;
+ ENDPROC(__serpent_enc_blk_4way)
+
+ ENTRY(serpent_dec_blk_4way)
+@@ -627,5 +628,5 @@ ENTRY(serpent_dec_blk_4way)
+ movl arg_dst(%esp), %eax;
+ write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);
+
+- ret;
++ pax_ret serpent_dec_blk_4way;
+ ENDPROC(serpent_dec_blk_4way)
diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
-index acc066c..1559cc4 100644
+index acc066c..d96c7c2 100644
--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
@@ -25,6 +25,7 @@
@@ -15442,33 +16684,34 @@ index acc066c..1559cc4 100644
.file "serpent-sse2-x86_64-asm_64.S"
.text
-@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
+@@ -690,13 +691,13 @@ ENTRY(__serpent_enc_blk_8way)
write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __serpent_enc_blk_8way;
.L__enc_xor8:
xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __serpent_enc_blk_8way;
ENDPROC(__serpent_enc_blk_8way)
-@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
+ ENTRY(serpent_dec_blk_8way)
+@@ -750,5 +751,5 @@ ENTRY(serpent_dec_blk_8way)
write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret serpent_dec_blk_8way;
ENDPROC(serpent_dec_blk_8way)
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
-index 870f6d8..9fed18e 100644
+index 870f6d8..aaf38de 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
-@@ -27,18 +27,18 @@
+@@ -27,18 +27,20 @@
#define SERPENT_AVX2_PARALLEL_BLOCKS 16
/* 16-way AVX2 parallel cipher functions */
@@ -15480,6 +16723,8 @@ index 870f6d8..9fed18e 100644
const u8 *src);
-asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src);
+asmlinkage void serpent_cbc_dec_16way(void *ctx, u8 *dst, const u8 *src);
++void __serpent_enc_blk16(void *ctx, u8 *dst, const u8 *src) __rap_hash;
++void __serpent_dec_blk16(void *ctx, u8 *dst, const u8 *src) __rap_hash;
asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
@@ -15552,11 +16797,30 @@ index 644f97a..4d069a1 100644
u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
unsigned int j;
+diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
+index 08ad1a9..293bc9e 100644
+--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
++++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
+@@ -106,5 +106,6 @@ struct job_sha1 *sha1_mb_mgr_submit_avx2(struct sha1_mb_mgr *state,
+ struct job_sha1 *job);
+ struct job_sha1 *sha1_mb_mgr_flush_avx2(struct sha1_mb_mgr *state);
+ struct job_sha1 *sha1_mb_mgr_get_comp_job_avx2(struct sha1_mb_mgr *state);
++struct job_sha1 *sha1_x8_avx2(struct sha1_mb_mgr *state) __rap_hash;
+
+ #endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
-index 96df6a3..8519a8f 100644
+index 96df6a3..f5f561f 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
-@@ -103,7 +103,7 @@ offset = \_offset
+@@ -53,6 +53,7 @@
+ */
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #include "sha1_mb_mgr_datastruct.S"
+
+
+@@ -103,7 +104,7 @@ offset = \_offset
# JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state)
# arg 1 : rcx : state
@@ -15565,7 +16829,25 @@ index 96df6a3..8519a8f 100644
FRAME_BEGIN
push %rbx
-@@ -226,7 +226,7 @@ ENDPROC(sha1_mb_mgr_flush_avx2)
+@@ -183,7 +184,7 @@ LABEL skip_ %I
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+- call sha1_x8_avx2
++ pax_direct_call sha1_x8_avx2
+ # state and idx are intact
+
+
+@@ -215,7 +216,7 @@ len_is_0:
+ return:
+ pop %rbx
+ FRAME_END
+- ret
++ pax_ret sha1_mb_mgr_flush_avx2
+
+ return_null:
+ xor job_rax, job_rax
+@@ -226,7 +227,7 @@ ENDPROC(sha1_mb_mgr_flush_avx2)
#################################################################
.align 16
@@ -15574,11 +16856,34 @@ index 96df6a3..8519a8f 100644
push %rbx
## if bit 32+3 is set, then all lanes are empty
+@@ -273,12 +274,12 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
+
+ pop %rbx
+
+- ret
++ pax_ret sha1_mb_mgr_get_comp_job_avx2
+
+ .return_null:
+ xor job_rax, job_rax
+ pop %rbx
+- ret
++ pax_ret sha1_mb_mgr_get_comp_job_avx2
+ ENDPROC(sha1_mb_mgr_get_comp_job_avx2)
+
+ .data
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
-index 63a0d9c..a6038fd 100644
+index 63a0d9c..53b60ac 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
-@@ -98,7 +98,7 @@ lane_data = %r10
+@@ -54,6 +54,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #include "sha1_mb_mgr_datastruct.S"
+
+
+@@ -98,7 +99,7 @@ lane_data = %r10
# JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job)
# arg 1 : rcx : state
# arg 2 : rdx : job
@@ -15587,8 +16892,89 @@ index 63a0d9c..a6038fd 100644
FRAME_BEGIN
push %rbx
push %r12
+@@ -163,7 +164,7 @@ start_loop:
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+- call sha1_x8_avx2
++ pax_direct_call sha1_x8_avx2
+
+ # state and idx are intact
+
+@@ -195,7 +196,7 @@ return:
+ pop %r12
+ pop %rbx
+ FRAME_END
+- ret
++ pax_ret sha1_mb_mgr_submit_avx2
+
+ return_null:
+ xor job_rax, job_rax
+diff --git a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
+index c9dae1c..6055141 100644
+--- a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
++++ b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
+@@ -53,6 +53,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "sha1_mb_mgr_datastruct.S"
+
+ ## code to compute oct SHA1 using SSE-256
+@@ -457,7 +458,7 @@ lloop:
+ pop %r13
+ pop %r12
+
+- ret
++ pax_ret sha1_x8_avx2
+ ENDPROC(sha1_x8_avx2)
+
+
+diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+index 1cd792d..2236003 100644
+--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
++++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+@@ -70,6 +70,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ #define CTX %rdi /* arg1 */
+ #define BUF %rsi /* arg2 */
+@@ -671,7 +672,7 @@ _loop3:
+ pop %rbp
+ pop %rbx
+
+- ret
++ pax_ret \name
+
+ ENDPROC(\name)
+ .endm
+diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S
+index 874a651..aa3d201 100644
+--- a/arch/x86/crypto/sha1_ni_asm.S
++++ b/arch/x86/crypto/sha1_ni_asm.S
+@@ -54,6 +54,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ #define DIGEST_PTR %rdi /* 1st arg */
+ #define DATA_PTR %rsi /* 2nd arg */
+@@ -290,7 +291,7 @@ ENTRY(sha1_ni_transform)
+ .Ldone_hash:
+ mov RSPSAVE, %rsp
+
+- ret
++ pax_ret sha1_ni_transform
+ ENDPROC(sha1_ni_transform)
+
+ .data
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
-index a410950..02d2056 100644
+index a410950..f0fefc3 100644
--- a/arch/x86/crypto/sha1_ssse3_asm.S
+++ b/arch/x86/crypto/sha1_ssse3_asm.S
@@ -29,6 +29,7 @@
@@ -15617,7 +17003,7 @@ index a410950..02d2056 100644
sub $64, %rsp # allocate workspace
and $~15, %rsp # align stack
-@@ -99,11 +101,12 @@
+@@ -99,12 +101,12 @@
xor %rax, %rax
rep stosq
@@ -15628,10 +17014,11 @@ index a410950..02d2056 100644
+ pop %r14
pop %rbp
pop %rbx
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret \name
ENDPROC(\name)
+ .endm
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index fc61739..03f7efe 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
@@ -15786,7 +17173,7 @@ index fc61739..03f7efe 100644
static int sha1_ni_final(struct shash_desc *desc, u8 *out)
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
-index 92b3b5d..8732479 100644
+index 92b3b5d..47aadd7 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -49,6 +49,7 @@
@@ -15807,16 +17194,17 @@ index 92b3b5d..8732479 100644
pushq %rbx
pushq %rbp
pushq %r13
-@@ -460,6 +460,7 @@ done_hash:
+@@ -460,7 +460,7 @@ done_hash:
popq %r13
popq %rbp
popq %rbx
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret sha256_transform_avx
ENDPROC(sha256_transform_avx)
+ .data
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
-index 570ec5e..9bcfa25 100644
+index 570ec5e..6c7f33c 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -50,6 +50,7 @@
@@ -15837,19 +17225,39 @@ index 570ec5e..9bcfa25 100644
pushq %rbx
pushq %rbp
pushq %r12
-@@ -720,6 +720,7 @@ done_hash:
+@@ -720,7 +720,7 @@ done_hash:
popq %r12
popq %rbp
popq %rbx
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret sha256_transform_rorx
ENDPROC(sha256_transform_rorx)
+ .data
+diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
+index b01ae40..880e1d4 100644
+--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
++++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
+@@ -104,5 +104,6 @@ struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state,
+ struct job_sha256 *job);
+ struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state);
+ struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state);
++struct job_sha256 *sha256_x8_avx2(struct sha256_mb_mgr *state) __rap_hash;
+
+ #endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
-index a78a069..127cb66 100644
+index a78a069..3919641 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
-@@ -101,7 +101,7 @@ offset = \_offset
+@@ -52,6 +52,7 @@
+ */
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #include "sha256_mb_mgr_datastruct.S"
+
+ .extern sha256_x8_avx2
+@@ -101,7 +102,7 @@ offset = \_offset
# JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state)
# arg 1 : rcx : state
@@ -15858,7 +17266,25 @@ index a78a069..127cb66 100644
FRAME_BEGIN
push %rbx
-@@ -225,7 +225,7 @@ ENDPROC(sha256_mb_mgr_flush_avx2)
+@@ -181,7 +182,7 @@ LABEL skip_ %I
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+- call sha256_x8_avx2
++ pax_direct_call sha256_x8_avx2
+ # state and idx are intact
+
+ len_is_0:
+@@ -215,7 +216,7 @@ len_is_0:
+ return:
+ pop %rbx
+ FRAME_END
+- ret
++ pax_ret sha256_mb_mgr_flush_avx2
+
+ return_null:
+ xor job_rax, job_rax
+@@ -225,7 +226,7 @@ ENDPROC(sha256_mb_mgr_flush_avx2)
##############################################################################
.align 16
@@ -15867,11 +17293,34 @@ index a78a069..127cb66 100644
push %rbx
## if bit 32+3 is set, then all lanes are empty
+@@ -276,12 +277,12 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
+
+ pop %rbx
+
+- ret
++ pax_ret sha256_mb_mgr_get_comp_job_avx2
+
+ .return_null:
+ xor job_rax, job_rax
+ pop %rbx
+- ret
++ pax_ret sha256_mb_mgr_get_comp_job_avx2
+ ENDPROC(sha256_mb_mgr_get_comp_job_avx2)
+
+ .data
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
-index 7ea670e..5aa297a 100644
+index 7ea670e..835723c 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
-@@ -96,7 +96,7 @@ lane_data = %r10
+@@ -53,6 +53,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #include "sha256_mb_mgr_datastruct.S"
+
+ .extern sha256_x8_avx2
+@@ -96,7 +97,7 @@ lane_data = %r10
# JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job)
# arg 1 : rcx : state
# arg 2 : rdx : job
@@ -15880,8 +17329,47 @@ index 7ea670e..5aa297a 100644
FRAME_BEGIN
push %rbx
push %r12
+@@ -164,7 +165,7 @@ start_loop:
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+- call sha256_x8_avx2
++ pax_direct_call sha256_x8_avx2
+
+ # state and idx are intact
+
+@@ -200,7 +201,7 @@ return:
+ pop %r12
+ pop %rbx
+ FRAME_END
+- ret
++ pax_ret sha256_mb_mgr_submit_avx2
+
+ return_null:
+ xor job_rax, job_rax
+diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
+index aa21aea..cb35a6e 100644
+--- a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
++++ b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
+@@ -52,6 +52,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "sha256_mb_mgr_datastruct.S"
+
+ ## code to compute oct SHA256 using SSE-256
+@@ -435,7 +436,7 @@ Lrounds_16_xx:
+ pop %r13
+ pop %r12
+
+- ret
++ pax_ret sha256_x8_avx2
+ ENDPROC(sha256_x8_avx2)
+ .data
+ .align 64
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
-index 2cedc44..6fb8582 100644
+index 2cedc44..35ed999 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@ -47,6 +47,7 @@
@@ -15903,19 +17391,28 @@ index 2cedc44..6fb8582 100644
pushq %rbx
pushq %rbp
pushq %r13
-@@ -471,6 +470,7 @@ done_hash:
+@@ -471,7 +470,7 @@ done_hash:
popq %rbp
popq %rbx
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret sha256_transform_ssse3
ENDPROC(sha256_transform_ssse3)
+ .data
diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S
-index 748cdf2..959bb4d 100644
+index 748cdf2..cd2180d 100644
--- a/arch/x86/crypto/sha256_ni_asm.S
+++ b/arch/x86/crypto/sha256_ni_asm.S
-@@ -97,7 +97,7 @@
+@@ -54,6 +54,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ #define DIGEST_PTR %rdi /* 1st arg */
+ #define DATA_PTR %rsi /* 2nd arg */
+@@ -97,7 +98,7 @@
.text
.align 32
@@ -15924,6 +17421,15 @@ index 748cdf2..959bb4d 100644
shl $6, NUM_BLKS /* convert to bytes */
jz .Ldone_hash
+@@ -326,7 +327,7 @@ ENTRY(sha256_ni_transform)
+
+ .Ldone_hash:
+
+- ret
++ pax_ret sha256_ni_transform
+ ENDPROC(sha256_ni_transform)
+
+ .data
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index 9e79baf..c5186c74 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
@@ -15997,7 +17503,7 @@ index 9e79baf..c5186c74 100644
static int sha256_ni_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
-index 565274d..779d34a 100644
+index 565274d..106c3dc 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -49,6 +49,7 @@
@@ -16018,16 +17524,17 @@ index 565274d..779d34a 100644
cmp $0, msglen
je nowork
-@@ -364,6 +366,7 @@ updateblock:
+@@ -364,7 +366,7 @@ updateblock:
mov frame_RSPSAVE(%rsp), %rsp
nowork:
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret sha512_transform_avx
ENDPROC(sha512_transform_avx)
+ ########################################################################
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
-index 1f20b35..ab1f3a8 100644
+index 1f20b35..f12df89 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -51,6 +51,7 @@
@@ -16048,19 +17555,39 @@ index 1f20b35..ab1f3a8 100644
# Allocate Stack Space
mov %rsp, %rax
sub $frame_size, %rsp
-@@ -678,6 +680,7 @@ done_hash:
+@@ -678,7 +680,7 @@ done_hash:
# Restore Stack Pointer
mov frame_RSPSAVE(%rsp), %rsp
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret sha512_transform_rorx
ENDPROC(sha512_transform_rorx)
+ ########################################################################
+diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
+index 178f17e..88a59c6 100644
+--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
++++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
+@@ -100,5 +100,6 @@ struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state,
+ struct job_sha512 *job);
+ struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state);
+ struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state);
++struct job_sha512 *sha512_x4_avx2(struct sha512_mb_mgr *state) __rap_hash;
+
+ #endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
-index 3ddba19..2d3abc7 100644
+index 3ddba19..392d6a1 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
-@@ -107,7 +107,7 @@ offset = \_offset
+@@ -53,6 +53,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #include "sha512_mb_mgr_datastruct.S"
+
+ .extern sha512_x4_avx2
+@@ -107,7 +108,7 @@ offset = \_offset
# JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state)
# arg 1 : rcx : state
@@ -16069,7 +17596,25 @@ index 3ddba19..2d3abc7 100644
FRAME_BEGIN
push %rbx
-@@ -220,7 +220,7 @@ return_null:
+@@ -177,7 +178,7 @@ LABEL skip_ %I
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+- call sha512_x4_avx2
++ pax_direct_call sha512_x4_avx2
+ # state and idx are intact
+
+ len_is_0:
+@@ -212,7 +213,7 @@ len_is_0:
+ return:
+ pop %rbx
+ FRAME_END
+- ret
++ pax_ret sha512_mb_mgr_flush_avx2
+
+ return_null:
+ xor job_rax, job_rax
+@@ -220,7 +221,7 @@ return_null:
ENDPROC(sha512_mb_mgr_flush_avx2)
.align 16
@@ -16078,11 +17623,34 @@ index 3ddba19..2d3abc7 100644
push %rbx
mov _unused_lanes(state), unused_lanes
+@@ -273,12 +274,12 @@ ENTRY(sha512_mb_mgr_get_comp_job_avx2)
+
+ pop %rbx
+
+- ret
++ pax_ret sha512_mb_mgr_get_comp_job_avx2
+
+ .return_null:
+ xor job_rax, job_rax
+ pop %rbx
+- ret
++ pax_ret sha512_mb_mgr_get_comp_job_avx2
+ ENDPROC(sha512_mb_mgr_get_comp_job_avx2)
+ .data
+
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
-index 815f07b..70fbc7b 100644
+index 815f07b..a1f961a 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
+++ b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
-@@ -98,7 +98,7 @@
+@@ -53,6 +53,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+ #include "sha512_mb_mgr_datastruct.S"
+
+ .extern sha512_x4_avx2
+@@ -98,7 +99,7 @@
# JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job)
# arg 1 : rcx : state
# arg 2 : rdx : job
@@ -16091,8 +17659,47 @@ index 815f07b..70fbc7b 100644
FRAME_BEGIN
push %rbx
push %r12
+@@ -167,7 +168,7 @@ start_loop:
+
+ # "state" and "args" are the same address, arg1
+ # len is arg2
+- call sha512_x4_avx2
++ pax_direct_call sha512_x4_avx2
+ # state and idx are intact
+
+ len_is_0:
+@@ -203,7 +204,7 @@ return:
+ pop %r12
+ pop %rbx
+ FRAME_END
+- ret
++ pax_ret sha512_mb_mgr_submit_avx2
+
+ return_null:
+ xor job_rax, job_rax
+diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
+index 31ab1ef..da5a002 100644
+--- a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
++++ b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
+@@ -63,6 +63,7 @@
+ # clobbers ymm0-15
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "sha512_mb_mgr_datastruct.S"
+
+ arg1 = %rdi
+@@ -358,7 +359,7 @@ Lrounds_16_xx:
+ pop %r12
+
+ # outer calling routine restores XMM and other GP registers
+- ret
++ pax_ret sha512_x4_avx2
+ ENDPROC(sha512_x4_avx2)
+
+ .data
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
-index e610e29..83f1cde 100644
+index e610e29..6b3848e 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -48,6 +48,7 @@
@@ -16113,14 +17720,15 @@ index e610e29..83f1cde 100644
cmp $0, msglen
je nowork
-@@ -363,6 +365,7 @@ updateblock:
+@@ -363,7 +365,7 @@ updateblock:
mov frame_RSPSAVE(%rsp), %rsp
nowork:
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret sha512_transform_ssse3
ENDPROC(sha512_transform_ssse3)
+ ########################################################################
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 2b0e2a6..59a1f94 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
@@ -16184,7 +17792,7 @@ index 2b0e2a6..59a1f94 100644
static int sha512_avx2_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
-index dc66273..30aba4b 100644
+index dc66273..91dc734b 100644
--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
@@ -25,6 +25,7 @@
@@ -16195,20 +17803,21 @@ index dc66273..30aba4b 100644
#include "glue_helper-asm-avx.S"
.file "twofish-avx-x86_64-asm_64.S"
-@@ -285,6 +286,7 @@ __twofish_enc_blk8:
+@@ -285,7 +286,7 @@ __twofish_enc_blk8:
outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __twofish_enc_blk8;
ENDPROC(__twofish_enc_blk8)
-@@ -325,10 +327,11 @@ __twofish_dec_blk8:
+ .align 8
+@@ -325,10 +326,10 @@ __twofish_dec_blk8:
outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __twofish_dec_blk8;
ENDPROC(__twofish_dec_blk8)
-ENTRY(twofish_ecb_enc_8way)
@@ -16216,12 +17825,18 @@ index dc66273..30aba4b 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -345,10 +348,11 @@ ENTRY(twofish_ecb_enc_8way)
+@@ -340,15 +341,15 @@ ENTRY(twofish_ecb_enc_8way)
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- call __twofish_enc_blk8;
++ pax_direct_call __twofish_enc_blk8;
+
store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret twofish_ecb_enc_8way;
ENDPROC(twofish_ecb_enc_8way)
-ENTRY(twofish_ecb_dec_8way)
@@ -16229,12 +17844,18 @@ index dc66273..30aba4b 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -365,10 +369,11 @@ ENTRY(twofish_ecb_dec_8way)
+@@ -360,15 +361,15 @@ ENTRY(twofish_ecb_dec_8way)
+
+ load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+
+- call __twofish_dec_blk8;
++ pax_direct_call __twofish_dec_blk8;
+
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret twofish_ecb_dec_8way;
ENDPROC(twofish_ecb_dec_8way)
-ENTRY(twofish_cbc_dec_8way)
@@ -16242,7 +17863,7 @@ index dc66273..30aba4b 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -376,24 +381,25 @@ ENTRY(twofish_cbc_dec_8way)
+@@ -376,24 +377,24 @@ ENTRY(twofish_cbc_dec_8way)
*/
FRAME_BEGIN
@@ -16255,7 +17876,8 @@ index dc66273..30aba4b 100644
load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
- call __twofish_dec_blk8;
+- call __twofish_dec_blk8;
++ pax_direct_call __twofish_dec_blk8;
- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
@@ -16264,8 +17886,8 @@ index dc66273..30aba4b 100644
+ popq %r14;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret twofish_cbc_dec_8way;
ENDPROC(twofish_cbc_dec_8way)
-ENTRY(twofish_ctr_8way)
@@ -16273,7 +17895,7 @@ index dc66273..30aba4b 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -402,25 +408,26 @@ ENTRY(twofish_ctr_8way)
+@@ -402,25 +403,25 @@ ENTRY(twofish_ctr_8way)
*/
FRAME_BEGIN
@@ -16287,7 +17909,8 @@ index dc66273..30aba4b 100644
load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
RD2, RX0, RX1, RY0);
- call __twofish_enc_blk8;
+- call __twofish_enc_blk8;
++ pax_direct_call __twofish_enc_blk8;
- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
@@ -16296,8 +17919,8 @@ index dc66273..30aba4b 100644
+ popq %r14;
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret twofish_ctr_8way;
ENDPROC(twofish_ctr_8way)
-ENTRY(twofish_xts_enc_8way)
@@ -16305,12 +17928,19 @@ index dc66273..30aba4b 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -441,10 +448,11 @@ ENTRY(twofish_xts_enc_8way)
+@@ -435,16 +436,16 @@ ENTRY(twofish_xts_enc_8way)
+ load_xts_8way(%rcx, %rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2,
+ RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask);
+
+- call __twofish_enc_blk8;
++ pax_direct_call __twofish_enc_blk8;
+
+ /* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret twofish_xts_enc_8way;
ENDPROC(twofish_xts_enc_8way)
-ENTRY(twofish_xts_dec_8way)
@@ -16318,18 +17948,33 @@ index dc66273..30aba4b 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -465,5 +473,6 @@ ENTRY(twofish_xts_dec_8way)
+@@ -459,11 +460,11 @@ ENTRY(twofish_xts_dec_8way)
+ load_xts_8way(%rcx, %rdx, %rsi, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2,
+ RX0, RX1, RY0, .Lxts_gf128mul_and_shl1_mask);
+
+- call __twofish_dec_blk8;
++ pax_direct_call __twofish_dec_blk8;
+
+ /* dst <= regs xor IVs(in dst) */
store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret twofish_xts_dec_8way;
ENDPROC(twofish_xts_dec_8way)
diff --git a/arch/x86/crypto/twofish-i586-asm_32.S b/arch/x86/crypto/twofish-i586-asm_32.S
-index 694ea45..f2c1418 100644
+index 694ea45..91b9a8d 100644
--- a/arch/x86/crypto/twofish-i586-asm_32.S
+++ b/arch/x86/crypto/twofish-i586-asm_32.S
-@@ -220,7 +220,7 @@
+@@ -22,6 +22,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/alternative-asm.h>
+
+ /* return address at 0 */
+
+@@ -220,7 +221,7 @@
xor %esi, d ## D;\
ror $1, d ## D;
@@ -16338,8 +17983,12 @@ index 694ea45..f2c1418 100644
push %ebp /* save registers according to calling convention*/
push %ebx
push %esi
-@@ -276,7 +276,7 @@ ENTRY(twofish_enc_blk)
- ret
+@@ -273,10 +274,10 @@ ENTRY(twofish_enc_blk)
+ pop %ebx
+ pop %ebp
+ mov $1, %eax
+- ret
++ pax_ret twofish_enc_blk
ENDPROC(twofish_enc_blk)
-ENTRY(twofish_dec_blk)
@@ -16347,8 +17996,15 @@ index 694ea45..f2c1418 100644
push %ebp /* save registers according to calling convention*/
push %ebx
push %esi
+@@ -330,5 +331,5 @@ ENTRY(twofish_dec_blk)
+ pop %ebx
+ pop %ebp
+ mov $1, %eax
+- ret
++ pax_ret twofish_dec_blk
+ ENDPROC(twofish_dec_blk)
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
-index 1c3b7ce..c9912c7 100644
+index 1c3b7ce..9a65a0b 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
@@ -21,6 +21,7 @@
@@ -16359,20 +18015,21 @@ index 1c3b7ce..c9912c7 100644
.file "twofish-x86_64-asm-3way.S"
.text
-@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
+@@ -258,7 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
popq %r13;
popq %r14;
popq %r15;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __twofish_enc_blk_3way;
.L__enc_xor3:
-@@ -269,10 +271,11 @@ ENTRY(__twofish_enc_blk_3way)
+ outunpack_enc3(xor);
+@@ -269,10 +270,10 @@ ENTRY(__twofish_enc_blk_3way)
popq %r13;
popq %r14;
popq %r15;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret __twofish_enc_blk_3way;
ENDPROC(__twofish_enc_blk_3way)
-ENTRY(twofish_dec_blk_3way)
@@ -16380,15 +18037,15 @@ index 1c3b7ce..c9912c7 100644
/* input:
* %rdi: ctx, CTX
* %rsi: dst
-@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
+@@ -308,5 +309,5 @@ ENTRY(twofish_dec_blk_3way)
popq %r13;
popq %r14;
popq %r15;
-+ pax_force_retaddr
- ret;
+- ret;
++ pax_ret twofish_dec_blk_3way;
ENDPROC(twofish_dec_blk_3way)
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
-index a350c99..080c5ab 100644
+index a350c99..b59af9f 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
@@ -22,6 +22,7 @@
@@ -16408,12 +18065,12 @@ index a350c99..080c5ab 100644
pushq R1
/* %rdi contains the ctx address */
-@@ -265,10 +266,11 @@ ENTRY(twofish_enc_blk)
+@@ -265,10 +266,10 @@ ENTRY(twofish_enc_blk)
popq R1
movl $1,%eax
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret twofish_enc_blk
ENDPROC(twofish_enc_blk)
-ENTRY(twofish_dec_blk)
@@ -16421,18 +18078,18 @@ index a350c99..080c5ab 100644
pushq R1
/* %rdi contains the ctx address */
-@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
+@@ -317,5 +318,5 @@ ENTRY(twofish_dec_blk)
popq R1
movl $1,%eax
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret twofish_dec_blk
ENDPROC(twofish_dec_blk)
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
-index b7a3904b..3e4d0d6 100644
+index b7a3904b..0d8bc60 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
-@@ -46,24 +46,25 @@
+@@ -46,24 +46,27 @@
#define TWOFISH_PARALLEL_BLOCKS 8
/* 8-way parallel cipher functions */
@@ -16442,6 +18099,8 @@ index b7a3904b..3e4d0d6 100644
-asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst,
+asmlinkage void twofish_ecb_dec_8way(void *ctx, u8 *dst,
const u8 *src);
++void __twofish_enc_blk8(void *ctx, u8 *dst, const u8 *src) __rap_hash;
++void __twofish_dec_blk8(void *ctx, u8 *dst, const u8 *src) __rap_hash;
-asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst,
+asmlinkage void twofish_cbc_dec_8way(void *ctx, u8 *dst,
@@ -16529,17 +18188,16 @@ index 2ebb5e9..a0b0aa9 100644
};
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
-index 9976fce..bf5f3e0 100644
+index 9976fce..4c336fd0 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
-@@ -15,3 +15,5 @@ obj-y += vsyscall/
+@@ -14,4 +14,3 @@ obj-y += vdso/
+ obj-y += vsyscall/
obj-$(CONFIG_IA32_EMULATION) += entry_64_compat.o syscall_32.o
-
-+CFLAGS_REMOVE_syscall_32.o = $(RAP_PLUGIN_ABS_CFLAGS)
-+CFLAGS_REMOVE_syscall_64.o = $(RAP_PLUGIN_ABS_CFLAGS)
+-
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
-index 9a9e588..b900d1c 100644
+index 9a9e588..4f8115a 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -95,23 +95,26 @@ For 32-bit we have the following conventions - kernel is built with
@@ -16684,27 +18342,34 @@ index 9a9e588..b900d1c 100644
.endm
.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
+@@ -212,7 +226,7 @@ For 32-bit we have the following conventions - kernel is built with
+ #ifdef HAVE_JUMP_LABEL
+ STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
+ #endif
+- call enter_from_user_mode
++ pax_direct_call enter_from_user_mode
+ .Lafter_call_\@:
+ #endif
+ .endm
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
-index 1433f6b..dac4cbe 100644
+index bdd9cc5..486d4bf 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -33,9 +33,7 @@
-
- static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
- {
-- unsigned long top_of_stack =
-- (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
-- return (struct thread_info *)(top_of_stack - THREAD_SIZE);
-+ return current_thread_info();
- }
-
- #ifdef CONFIG_CONTEXT_TRACKING
-@@ -49,6 +47,12 @@ __visible inline void enter_from_user_mode(void)
+@@ -42,6 +42,21 @@ __visible inline void enter_from_user_mode(void)
static inline void enter_from_user_mode(void) {}
#endif
++void pax_enter_kernel(void) __rap_hash;
++void pax_enter_kernel_user(void) __rap_hash;
++void pax_exit_kernel(void) __rap_hash;
++void pax_exit_kernel_user(void) __rap_hash;
++
++void paranoid_entry(void) __rap_hash;
++void paranoid_entry_nmi(void) __rap_hash;
++void error_entry(void) __rap_hash;
++
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+asmlinkage void pax_erase_kstack(void);
++asmlinkage void pax_erase_kstack(void) __rap_hash;
+#else
+static void pax_erase_kstack(void) {}
+#endif
@@ -16712,7 +18377,7 @@ index 1433f6b..dac4cbe 100644
static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
{
#ifdef CONFIG_X86_64
-@@ -63,6 +67,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
+@@ -56,6 +71,10 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
}
}
@@ -16723,7 +18388,7 @@ index 1433f6b..dac4cbe 100644
/*
* Returns the syscall nr to run (which should match regs->orig_ax) or -1
* to skip the syscall.
-@@ -81,12 +89,19 @@ static long syscall_trace_enter(struct pt_regs *regs)
+@@ -74,12 +93,19 @@ static long syscall_trace_enter(struct pt_regs *regs)
work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
@@ -16744,11 +18409,13 @@ index 1433f6b..dac4cbe 100644
if (emulated)
return -1L;
-@@ -121,8 +136,10 @@ static long syscall_trace_enter(struct pt_regs *regs)
+@@ -113,9 +139,11 @@ static long syscall_trace_enter(struct pt_regs *regs)
+ sd.args[5] = regs->bp;
}
- ret = __secure_computing(&sd);
+- ret = __secure_computing(&sd);
- if (ret == -1)
++ ret = secure_computing(&sd);
+ if (ret == -1) {
+ pax_erase_kstack();
return ret;
@@ -16756,7 +18423,7 @@ index 1433f6b..dac4cbe 100644
}
#endif
-@@ -131,6 +148,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
+@@ -124,6 +152,7 @@ static long syscall_trace_enter(struct pt_regs *regs)
do_audit_syscall_entry(regs, arch);
@@ -16764,7 +18431,7 @@ index 1433f6b..dac4cbe 100644
return ret ?: regs->orig_ax;
}
-@@ -237,7 +255,7 @@ static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
+@@ -229,7 +258,7 @@ static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
step = unlikely(
(cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
== _TIF_SINGLESTEP);
@@ -16773,7 +18440,7 @@ index 1433f6b..dac4cbe 100644
tracehook_report_syscall_exit(regs, step);
}
-@@ -256,6 +274,11 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
+@@ -248,6 +277,11 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
local_irq_enable();
@@ -16785,89 +18452,7 @@ index 1433f6b..dac4cbe 100644
/*
* First do one-time work. If these work items are enabled, we
* want to run them exactly once per syscall exit with IRQs on.
-@@ -285,9 +308,29 @@ __visible void do_syscall_64(struct pt_regs *regs)
- * regs->orig_ax, which changes the behavior of some syscalls.
- */
- if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
-+#ifdef CONFIG_PAX_RAP
-+ asm volatile("movq %[param1],%%rdi\n\t"
-+ "movq %[param2],%%rsi\n\t"
-+ "movq %[param3],%%rdx\n\t"
-+ "movq %[param4],%%rcx\n\t"
-+ "movq %[param5],%%r8\n\t"
-+ "movq %[param6],%%r9\n\t"
-+ "call *%P[syscall]\n\t"
-+ "mov %%rax,%[result]\n\t"
-+ : [result] "=m" (regs->ax)
-+ : [syscall] "m" (sys_call_table[nr & __SYSCALL_MASK]),
-+ [param1] "m" (regs->di),
-+ [param2] "m" (regs->si),
-+ [param3] "m" (regs->dx),
-+ [param4] "m" (regs->r10),
-+ [param5] "m" (regs->r8),
-+ [param6] "m" (regs->r9)
-+ : "ax", "di", "si", "dx", "cx", "r8", "r9", "r10", "r11", "memory");
-+#else
- regs->ax = sys_call_table[nr & __SYSCALL_MASK](
- regs->di, regs->si, regs->dx,
- regs->r10, regs->r8, regs->r9);
-+#endif
- }
-
- syscall_return_slowpath(regs);
-@@ -327,10 +370,51 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
- * the high bits are zero. Make sure we zero-extend all
- * of the args.
- */
-+#ifdef CONFIG_PAX_RAP
-+#ifdef CONFIG_X86_64
-+ asm volatile("movl %[param1],%%edi\n\t"
-+ "movl %[param2],%%esi\n\t"
-+ "movl %[param3],%%edx\n\t"
-+ "movl %[param4],%%ecx\n\t"
-+ "movl %[param5],%%r8d\n\t"
-+ "movl %[param6],%%r9d\n\t"
-+ "call *%P[syscall]\n\t"
-+ "mov %%rax,%[result]\n\t"
-+ : [result] "=m" (regs->ax)
-+ : [syscall] "m" (ia32_sys_call_table[nr]),
-+ [param1] "m" (regs->bx),
-+ [param2] "m" (regs->cx),
-+ [param3] "m" (regs->dx),
-+ [param4] "m" (regs->si),
-+ [param5] "m" (regs->di),
-+ [param6] "m" (regs->bp)
-+ : "ax", "di", "si", "dx", "cx", "r8", "r9", "r10", "r11", "memory");
-+#else
-+ asm volatile("pushl %[param6]\n\t"
-+ "pushl %[param5]\n\t"
-+ "pushl %[param4]\n\t"
-+ "pushl %[param3]\n\t"
-+ "pushl %[param2]\n\t"
-+ "pushl %[param1]\n\t"
-+ "call *%P[syscall]\n\t"
-+ "addl $6*8,%%esp\n\t"
-+ "mov %%eax,%[result]\n\t"
-+ : [result] "=m" (regs->ax)
-+ : [syscall] "m" (ia32_sys_call_table[nr]),
-+ [param1] "m" (regs->bx),
-+ [param2] "m" (regs->cx),
-+ [param3] "m" (regs->dx),
-+ [param4] "m" (regs->si),
-+ [param5] "m" (regs->di),
-+ [param6] "m" (regs->bp)
-+ : "ax", "dx", "cx", "memory");
-+#endif
-+#else
- regs->ax = ia32_sys_call_table[nr](
- (unsigned int)regs->bx, (unsigned int)regs->cx,
- (unsigned int)regs->dx, (unsigned int)regs->si,
- (unsigned int)regs->di, (unsigned int)regs->bp);
-+#endif
- }
-
- syscall_return_slowpath(regs);
-@@ -354,6 +438,7 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
+@@ -346,6 +380,7 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
vdso_image_32.sym_int80_landing_pad;
@@ -16875,7 +18460,7 @@ index 1433f6b..dac4cbe 100644
/*
* SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
-@@ -373,11 +458,9 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
+@@ -365,11 +400,9 @@ __visible long do_fast_syscall_32(struct pt_regs *regs)
* Micro-optimization: the pointer we're following is explicitly
* 32 bits, so it can't be out of range.
*/
@@ -16890,10 +18475,18 @@ index 1433f6b..dac4cbe 100644
) {
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
-index b84a349..7ed55b6 100644
+index edba860..d684e0f 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
-@@ -147,13 +147,157 @@
+@@ -45,6 +45,7 @@
+ #include <asm/asm.h>
+ #include <asm/smap.h>
+ #include <asm/export.h>
++#include <asm/current.h>
+
+ .section .entry.text, "ax"
+
+@@ -148,13 +149,157 @@
movl \reg, PT_GS(%esp)
.endm
.macro SET_KERNEL_GS reg
@@ -16914,13 +18507,13 @@ index b84a349..7ed55b6 100644
-.macro SAVE_ALL pt_regs_ax=%eax
+.macro pax_enter_kernel
+#ifdef CONFIG_PAX_KERNEXEC
-+ call pax_enter_kernel
++ pax_direct_call pax_enter_kernel
+#endif
+.endm
+
+.macro pax_exit_kernel
+#ifdef CONFIG_PAX_KERNEXEC
-+ call pax_exit_kernel
++ pax_direct_call pax_exit_kernel
+#endif
+.endm
+
@@ -16929,7 +18522,7 @@ index b84a349..7ed55b6 100644
+#ifdef CONFIG_PARAVIRT
+ pushl %eax
+ pushl %ecx
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
++ pax_indirect_call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0), read_cr0
+ mov %eax, %esi
+#else
+ mov %cr0, %esi
@@ -16944,7 +18537,7 @@ index b84a349..7ed55b6 100644
+2:
+#ifdef CONFIG_PARAVIRT
+ mov %esi, %eax
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++ pax_indirect_call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0), write_cr0
+#else
+ mov %esi, %cr0
+#endif
@@ -16953,7 +18546,7 @@ index b84a349..7ed55b6 100644
+ popl %ecx
+ popl %eax
+#endif
-+ ret
++ pax_ret pax_enter_kernel
+ENDPROC(pax_enter_kernel)
+
+ENTRY(pax_exit_kernel)
@@ -16965,7 +18558,7 @@ index b84a349..7ed55b6 100644
+ cmp $__KERNEXEC_KERNEL_CS, %esi
+ jnz 2f
+#ifdef CONFIG_PARAVIRT
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
++ pax_indirect_call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0), read_cr0
+ mov %eax, %esi
+#else
+ mov %cr0, %esi
@@ -16975,7 +18568,7 @@ index b84a349..7ed55b6 100644
+1:
+#ifdef CONFIG_PARAVIRT
+ mov %esi, %eax
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
++ pax_indirect_call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0), write_cr0
+#else
+ mov %esi, %cr0
+#endif
@@ -16984,13 +18577,13 @@ index b84a349..7ed55b6 100644
+ popl %ecx
+ popl %eax
+#endif
-+ ret
++ pax_ret pax_exit_kernel
+ENDPROC(pax_exit_kernel)
+#endif
+
+ .macro pax_erase_kstack
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ call pax_erase_kstack
++ pax_direct_call pax_erase_kstack
+#endif
+ .endm
+
@@ -17004,8 +18597,8 @@ index b84a349..7ed55b6 100644
+ pushl %eax
+ pushl %ebp
+
-+ GET_THREAD_INFO(%ebp)
-+ mov TI_lowest_stack(%ebp), %edi
++ GET_CURRENT(%ebp)
++ mov TASK_lowest_stack(%ebp), %edi
+ mov $-0xBEEF, %eax
+ std
+
@@ -17036,15 +18629,15 @@ index b84a349..7ed55b6 100644
+ shr $2, %ecx
+ rep stosl
+
-+ mov TI_task_thread_sp0(%ebp), %edi
++ mov TASK_thread_sp0(%ebp), %edi
+ sub $128, %edi
-+ mov %edi, TI_lowest_stack(%ebp)
++ mov %edi, TASK_lowest_stack(%ebp)
+
+ popl %ebp
+ popl %eax
+ popl %ecx
+ popl %edi
-+ ret
++ pax_ret pax_erase_kstack
+ENDPROC(pax_erase_kstack)
+#endif
+
@@ -17052,7 +18645,7 @@ index b84a349..7ed55b6 100644
cld
PUSH_GS
pushl %fs
-@@ -166,7 +310,7 @@
+@@ -167,7 +312,7 @@
pushl %edx
pushl %ecx
pushl %ebx
@@ -17061,7 +18654,7 @@ index b84a349..7ed55b6 100644
movl %edx, %ds
movl %edx, %es
movl $(__KERNEL_PERCPU), %edx
-@@ -174,6 +318,15 @@
+@@ -175,6 +320,15 @@
SET_KERNEL_GS %edx
.endm
@@ -17077,16 +18670,49 @@ index b84a349..7ed55b6 100644
.macro RESTORE_INT_REGS
popl %ebx
popl %ecx
-@@ -213,7 +366,7 @@ ENTRY(ret_from_fork)
+@@ -235,7 +389,7 @@ ENTRY(__switch_to_asm)
+ popl %ebp
+
+ jmp __switch_to
+-END(__switch_to_asm)
++ENDPROC(__switch_to_asm)
+
+ /*
+ * A newly forked process directly context switches into this address.
+@@ -246,7 +400,7 @@ END(__switch_to_asm)
+ */
+ ENTRY(ret_from_fork)
+ pushl %eax
+- call schedule_tail
++ pax_direct_call schedule_tail
+ popl %eax
+
+ testl %ebx, %ebx
+@@ -255,12 +409,12 @@ ENTRY(ret_from_fork)
+ 2:
+ /* When we fork, we trace the syscall return in the child, too. */
movl %esp, %eax
- call syscall_return_slowpath
+- call syscall_return_slowpath
++ pax_direct_call syscall_return_slowpath
jmp restore_all
+
+ /* kernel thread */
+ 1: movl %edi, %eax
+- call *%ebx
++ pax_indirect_call "%ebx", kthreadd
+ /*
+ * A kernel thread is allowed to return here after successfully
+ * calling do_execve(). Exit to userspace to complete the execve()
+@@ -268,7 +422,7 @@ ENTRY(ret_from_fork)
+ */
+ movl $0, PT_EAX(%esp)
+ jmp 2b
-END(ret_from_fork)
+ENDPROC(ret_from_fork)
- ENTRY(ret_from_kernel_thread)
- pushl %eax
-@@ -257,15 +410,23 @@ ret_from_intr:
+ /*
+ * Return to user mode is not as complex as all this looks,
+@@ -294,15 +448,23 @@ ret_from_intr:
andl $SEGMENT_RPL_MASK, %eax
#endif
cmpl $USER_RPL, %eax
@@ -17104,24 +18730,28 @@ index b84a349..7ed55b6 100644
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
movl %esp, %eax
- call prepare_exit_to_usermode
+- call prepare_exit_to_usermode
- jmp restore_all
-END(ret_from_exception)
++ pax_direct_call prepare_exit_to_usermode
+ jmp .Lsyscall_32_done
+ENDPROC(ret_from_exception)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
-@@ -277,7 +438,7 @@ need_resched:
+@@ -312,9 +474,9 @@ need_resched:
+ jnz restore_all
+ testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all
- call preempt_schedule_irq
+- call preempt_schedule_irq
++ pax_direct_call preempt_schedule_irq
jmp need_resched
-END(resume_kernel)
+ENDPROC(resume_kernel)
#endif
GLOBAL(__begin_SYSENTER_singlestep_region)
-@@ -344,6 +505,10 @@ sysenter_past_esp:
+@@ -381,6 +543,10 @@ sysenter_past_esp:
pushl %eax /* pt_regs->orig_ax */
SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
@@ -17132,13 +18762,19 @@ index b84a349..7ed55b6 100644
/*
* SYSENTER doesn't filter flags, so we need to clear NT, AC
* and TF ourselves. To save a few cycles, we can check whether
-@@ -379,11 +544,20 @@ sysenter_past_esp:
+@@ -411,16 +577,25 @@ sysenter_past_esp:
+ TRACE_IRQS_OFF
+
+ movl %esp, %eax
+- call do_fast_syscall_32
++ pax_direct_call do_fast_syscall_32
+ /* XEN PV guests always use IRET path */
ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
"jmp .Lsyscall_32_done", X86_FEATURE_XENPV
+#ifdef CONFIG_PAX_RANDKSTACK
+ movl %esp, %eax
-+ call pax_randomize_kstack
++ pax_direct_call pax_randomize_kstack
+#endif
+
+ pax_erase_kstack
@@ -17153,7 +18789,7 @@ index b84a349..7ed55b6 100644
PTGS_TO_GS
popl %ebx /* pt_regs->bx */
addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
-@@ -409,10 +583,16 @@ sysenter_past_esp:
+@@ -446,10 +621,16 @@ sysenter_past_esp:
sysexit
.pushsection .fixup, "ax"
@@ -17172,7 +18808,7 @@ index b84a349..7ed55b6 100644
PTGS_TO_GS_EX
.Lsysenter_fix_flags:
-@@ -455,6 +635,10 @@ ENTRY(entry_INT80_32)
+@@ -492,6 +673,10 @@ ENTRY(entry_INT80_32)
pushl %eax /* pt_regs->orig_ax */
SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
@@ -17183,13 +18819,17 @@ index b84a349..7ed55b6 100644
/*
* User mode is traced as though IRQs are on, and the interrupt gate
* turned them off.
-@@ -465,6 +649,13 @@ ENTRY(entry_INT80_32)
- call do_int80_syscall_32
+@@ -499,9 +684,16 @@ ENTRY(entry_INT80_32)
+ TRACE_IRQS_OFF
+
+ movl %esp, %eax
+- call do_int80_syscall_32
++ pax_direct_call do_int80_syscall_32
.Lsyscall_32_done:
+#ifdef CONFIG_PAX_RANDKSTACK
+ movl %esp, %eax
-+ call pax_randomize_kstack
++ pax_direct_call pax_randomize_kstack
+#endif
+
+ pax_erase_kstack
@@ -17197,7 +18837,7 @@ index b84a349..7ed55b6 100644
restore_all:
TRACE_IRQS_IRET
restore_all_notrace:
-@@ -508,14 +699,34 @@ ldt_ss:
+@@ -545,14 +737,34 @@ ldt_ss:
* compensating for the offset by changing to the ESPFIX segment with
* a base address that matches for the difference.
*/
@@ -17235,7 +18875,7 @@ index b84a349..7ed55b6 100644
pushl $__ESPFIX_SS
pushl %eax /* new kernel esp */
/*
-@@ -539,8 +750,15 @@ ENDPROC(entry_INT80_32)
+@@ -576,8 +788,15 @@ ENDPROC(entry_INT80_32)
*/
#ifdef CONFIG_X86_ESPFIX32
/* fixup the stack */
@@ -17253,7 +18893,7 @@ index b84a349..7ed55b6 100644
shl $16, %eax
addl %esp, %eax /* the adjusted stack pointer */
pushl $__KERNEL_DS
-@@ -576,7 +794,7 @@ ENTRY(irq_entries_start)
+@@ -613,7 +832,7 @@ ENTRY(irq_entries_start)
jmp common_interrupt
.align 8
.endr
@@ -17262,7 +18902,25 @@ index b84a349..7ed55b6 100644
/*
* the CPU automatically disables interrupts when executing an IRQ vector,
-@@ -623,7 +841,7 @@ ENTRY(coprocessor_error)
+@@ -626,7 +845,7 @@ common_interrupt:
+ SAVE_ALL
+ TRACE_IRQS_OFF
+ movl %esp, %eax
+- call do_IRQ
++ pax_direct_call do_IRQ
+ jmp ret_from_intr
+ ENDPROC(common_interrupt)
+
+@@ -637,7 +856,7 @@ ENTRY(name) \
+ SAVE_ALL; \
+ TRACE_IRQS_OFF \
+ movl %esp, %eax; \
+- call fn; \
++ pax_direct_call fn; \
+ jmp ret_from_intr; \
+ ENDPROC(name)
+
+@@ -660,7 +879,7 @@ ENTRY(coprocessor_error)
pushl $0
pushl $do_coprocessor_error
jmp error_code
@@ -17271,7 +18929,7 @@ index b84a349..7ed55b6 100644
ENTRY(simd_coprocessor_error)
ASM_CLAC
-@@ -637,20 +855,20 @@ ENTRY(simd_coprocessor_error)
+@@ -674,20 +893,20 @@ ENTRY(simd_coprocessor_error)
pushl $do_simd_coprocessor_error
#endif
jmp error_code
@@ -17295,7 +18953,7 @@ index b84a349..7ed55b6 100644
#endif
ENTRY(overflow)
-@@ -658,59 +876,59 @@ ENTRY(overflow)
+@@ -695,59 +914,59 @@ ENTRY(overflow)
pushl $0
pushl $do_overflow
jmp error_code
@@ -17364,7 +19022,7 @@ index b84a349..7ed55b6 100644
#ifdef CONFIG_X86_MCE
ENTRY(machine_check)
-@@ -718,7 +936,7 @@ ENTRY(machine_check)
+@@ -755,7 +974,7 @@ ENTRY(machine_check)
pushl $0
pushl machine_check_vector
jmp error_code
@@ -17373,7 +19031,7 @@ index b84a349..7ed55b6 100644
#endif
ENTRY(spurious_interrupt_bug)
-@@ -726,7 +944,16 @@ ENTRY(spurious_interrupt_bug)
+@@ -763,7 +982,32 @@ ENTRY(spurious_interrupt_bug)
pushl $0
pushl $do_spurious_interrupt_bug
jmp error_code
@@ -17388,46 +19046,127 @@ index b84a349..7ed55b6 100644
+ jmp error_code
+ENDPROC(refcount_error)
+#endif
++
++#ifdef CONFIG_PAX_RAP
++ENTRY(rap_call_error)
++ ASM_CLAC
++ pushl $0
++ pushl $do_rap_call_error
++ jmp error_code
++ENDPROC(rap_call_error)
++
++ENTRY(rap_ret_error)
++ ASM_CLAC
++ pushl $0
++ pushl $do_rap_ret_error
++ jmp error_code
++ENDPROC(rap_ret_error)
++#endif
#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
-@@ -825,7 +1052,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+@@ -788,9 +1032,9 @@ ENTRY(xen_hypervisor_callback)
+
+ ENTRY(xen_do_upcall)
+ 1: mov %esp, %eax
+- call xen_evtchn_do_upcall
++ pax_direct_call xen_evtchn_do_upcall
+ #ifndef CONFIG_PREEMPT
+- call xen_maybe_preempt_hcall
++ pax_direct_call xen_maybe_preempt_hcall
+ #endif
+ jmp ret_from_intr
+ ENDPROC(xen_hypervisor_callback)
+@@ -861,8 +1105,8 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+ #ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(mcount)
- ret
+- ret
-END(mcount)
++ pax_ret mcount
+ENDPROC(mcount)
ENTRY(ftrace_caller)
pushl %eax
-@@ -855,7 +1082,7 @@ ftrace_graph_call:
+@@ -876,7 +1120,7 @@ ENTRY(ftrace_caller)
+
+ .globl ftrace_call
+ ftrace_call:
+- call ftrace_stub
++ pax_direct_call ftrace_stub
+
+ addl $4, %esp /* skip NULL pointer */
+ popl %edx
+@@ -891,8 +1135,8 @@ ftrace_graph_call:
+
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
- ret
+- ret
-END(ftrace_caller)
++ pax_ret ftrace_caller
+ENDPROC(ftrace_caller)
ENTRY(ftrace_regs_caller)
pushf /* push flags before compare (in cs location) */
-@@ -953,7 +1180,7 @@ trace:
+@@ -931,7 +1175,7 @@ ENTRY(ftrace_regs_caller)
+ pushl %esp /* Save pt_regs as 4th parameter */
+
+ GLOBAL(ftrace_regs_call)
+- call ftrace_stub
++ pax_direct_call ftrace_stub
+
+ addl $4, %esp /* Skip pt_regs */
+ movl 14*4(%esp), %eax /* Move flags back into cs */
+@@ -973,7 +1217,7 @@ ENTRY(mcount)
+ #endif
+ .globl ftrace_stub
+ ftrace_stub:
+- ret
++ pax_ret ftrace_stub
+
+ /* taken from glibc */
+ trace:
+@@ -984,13 +1228,13 @@ trace:
+ movl 0x4(%ebp), %edx
+ subl $MCOUNT_INSN_SIZE, %eax
+
+- call *ftrace_trace_function
++ pax_indirect_call "ftrace_trace_function", ftrace_stub
+
+ popl %edx
popl %ecx
popl %eax
jmp ftrace_stub
-END(mcount)
+ENDPROC(mcount)
#endif /* CONFIG_DYNAMIC_FTRACE */
+ EXPORT_SYMBOL(mcount)
#endif /* CONFIG_FUNCTION_TRACER */
-
-@@ -971,7 +1198,7 @@ ENTRY(ftrace_graph_caller)
+@@ -1004,19 +1248,19 @@ ENTRY(ftrace_graph_caller)
+ lea 0x4(%ebp), %edx
+ movl (%ebp), %ecx
+ subl $MCOUNT_INSN_SIZE, %eax
+- call prepare_ftrace_return
++ pax_direct_call prepare_ftrace_return
+ popl %edx
popl %ecx
popl %eax
- ret
+- ret
-END(ftrace_graph_caller)
++ pax_ret ftrace_graph_caller
+ENDPROC(ftrace_graph_caller)
.globl return_to_handler
return_to_handler:
-@@ -990,7 +1217,7 @@ ENTRY(trace_page_fault)
+ pushl %eax
+ pushl %edx
+ movl %ebp, %eax
+- call ftrace_return_to_handler
++ pax_direct_call ftrace_return_to_handler
+ movl %eax, %ecx
+ popl %edx
+ popl %eax
+@@ -1028,7 +1272,7 @@ ENTRY(trace_page_fault)
ASM_CLAC
pushl $trace_do_page_fault
jmp error_code
@@ -17436,7 +19175,7 @@ index b84a349..7ed55b6 100644
#endif
ENTRY(page_fault)
-@@ -1019,16 +1246,19 @@ error_code:
+@@ -1057,16 +1301,19 @@ error_code:
movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx
@@ -17449,7 +19188,8 @@ index b84a349..7ed55b6 100644
+
TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer
- call *%edi
+- call *%edi
++ pax_indirect_call "%edi", do_page_fault
jmp ret_from_exception
-END(page_fault)
+ENDPROC(page_fault)
@@ -17459,7 +19199,7 @@ index b84a349..7ed55b6 100644
/*
* #DB can happen at the first instruction of
* entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this
-@@ -1045,7 +1275,13 @@ ENTRY(debug)
+@@ -1083,13 +1330,19 @@ ENTRY(debug)
movl %esp, %eax # pt_regs pointer
/* Are we currently on the SYSENTER stack? */
@@ -17474,8 +19214,19 @@ index b84a349..7ed55b6 100644
subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
cmpl $SIZEOF_SYSENTER_stack, %ecx
jb .Ldebug_from_sysenter_stack
-@@ -1062,7 +1298,7 @@ ENTRY(debug)
- call do_debug
+
+ TRACE_IRQS_OFF
+- call do_debug
++ pax_direct_call do_debug
+ jmp ret_from_exception
+
+ .Ldebug_from_sysenter_stack:
+@@ -1097,10 +1350,10 @@ ENTRY(debug)
+ movl %esp, %ebp
+ movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
+ TRACE_IRQS_OFF
+- call do_debug
++ pax_direct_call do_debug
movl %ebp, %esp
jmp ret_from_exception
-END(debug)
@@ -17483,7 +19234,7 @@ index b84a349..7ed55b6 100644
/*
* NMI is doubly nasty. It can happen on the first instruction of
-@@ -1087,13 +1323,22 @@ ENTRY(nmi)
+@@ -1125,13 +1378,22 @@ ENTRY(nmi)
movl %esp, %eax # pt_regs pointer
/* Are we currently on the SYSENTER stack? */
@@ -17500,16 +19251,20 @@ index b84a349..7ed55b6 100644
jb .Lnmi_from_sysenter_stack
/* Not on SYSENTER stack. */
- call do_nmi
+- call do_nmi
++ pax_direct_call do_nmi
+
+ pax_exit_kernel
+
jmp restore_all_notrace
.Lnmi_from_sysenter_stack:
-@@ -1105,6 +1350,9 @@ ENTRY(nmi)
+@@ -1141,8 +1403,11 @@ ENTRY(nmi)
+ */
+ movl %esp, %ebp
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
- call do_nmi
+- call do_nmi
++ pax_direct_call do_nmi
movl %ebp, %esp
+
+ pax_exit_kernel
@@ -17517,10 +19272,12 @@ index b84a349..7ed55b6 100644
jmp restore_all_notrace
#ifdef CONFIG_X86_ESPFIX32
-@@ -1124,11 +1372,14 @@ nmi_espfix_stack:
+@@ -1161,12 +1426,15 @@ nmi_espfix_stack:
+ SAVE_ALL
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx, %edx # zero error code
- call do_nmi
+- call do_nmi
++ pax_direct_call do_nmi
+
+ pax_exit_kernel
+
@@ -17533,9 +19290,12 @@ index b84a349..7ed55b6 100644
ENTRY(int3)
ASM_CLAC
-@@ -1139,19 +1390,19 @@ ENTRY(int3)
+@@ -1175,21 +1443,21 @@ ENTRY(int3)
+ TRACE_IRQS_OFF
+ xorl %edx, %edx # zero error code
movl %esp, %eax # pt_regs pointer
- call do_int3
+- call do_int3
++ pax_direct_call do_int3
jmp ret_from_exception
-END(int3)
+ENDPROC(int3)
@@ -17556,29 +19316,30 @@ index b84a349..7ed55b6 100644
#endif
ENTRY(rewind_stack_do_exit)
-@@ -1161,6 +1412,6 @@ ENTRY(rewind_stack_do_exit)
+@@ -1199,6 +1467,6 @@ ENTRY(rewind_stack_do_exit)
movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
- call do_exit
-+ call do_group_exit
++ pax_direct_call do_group_exit
1: jmp 1b
-END(rewind_stack_do_exit)
+ENDPROC(rewind_stack_do_exit)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
-index 02fff3e..c6685ec 100644
+index ef766a3..d3f0e59 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -36,6 +36,8 @@
- #include <asm/smap.h>
+@@ -37,6 +37,9 @@
#include <asm/pgtable_types.h>
+ #include <asm/export.h>
#include <linux/err.h>
+#include <asm/pgtable.h>
+#include <asm/alternative-asm.h>
++#include <asm/current.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
-@@ -53,6 +55,395 @@ ENTRY(native_usergs_sysret64)
+@@ -54,6 +57,392 @@ ENTRY(native_usergs_sysret64)
ENDPROC(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
@@ -17599,13 +19360,13 @@ index 02fff3e..c6685ec 100644
+ .macro pax_enter_kernel
+ pax_set_fptr_mask
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ call pax_enter_kernel
++ pax_direct_call pax_enter_kernel
+#endif
+ .endm
+
+ .macro pax_exit_kernel
+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ call pax_exit_kernel
++ pax_direct_call pax_exit_kernel
+#endif
+ .endm
+
@@ -17651,8 +19412,7 @@ index 02fff3e..c6685ec 100644
+#endif
+
+ popq %rdi
-+ pax_force_retaddr
-+ retq
++ pax_ret pax_enter_kernel
+
+#ifdef CONFIG_PAX_KERNEXEC
+2: ljmpq __KERNEL_CS,1b
@@ -17701,8 +19461,7 @@ index 02fff3e..c6685ec 100644
+#endif
+
+ popq %rdi
-+ pax_force_retaddr
-+ retq
++ pax_ret pax_exit_kernel
+
+#ifdef CONFIG_PAX_KERNEXEC
+2: GET_CR0_INTO_RDI
@@ -17720,18 +19479,18 @@ index 02fff3e..c6685ec 100644
+ .macro pax_enter_kernel_user
+ pax_set_fptr_mask
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_enter_kernel_user
++ pax_direct_call pax_enter_kernel_user
+#endif
+ .endm
+
+ .macro pax_exit_kernel_user
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_exit_kernel_user
++ pax_direct_call pax_exit_kernel_user
+#endif
+#ifdef CONFIG_PAX_RANDKSTACK
+ pushq %rax
+ pushq %r11
-+ call pax_randomize_kstack
++ pax_direct_call pax_randomize_kstack
+ popq %r11
+ popq %rax
+#endif
@@ -17739,6 +19498,7 @@ index 02fff3e..c6685ec 100644
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ENTRY(pax_enter_kernel_user)
++GLOBAL(patch_pax_enter_kernel_user)
+ pushq %rdi
+ pushq %rbx
+
@@ -17768,7 +19528,7 @@ index 02fff3e..c6685ec 100644
+ mov i*8(%rbx),%rsi
+ mov $0,%sil
+ lea i*8(%rbx),%rdi
-+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ pax_indirect_call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched), pv_mmu_ops.set_pgd_batched
+ i = i + 1
+ .endr
+ popq %rdi
@@ -17796,12 +19556,12 @@ index 02fff3e..c6685ec 100644
+
+ popq %rbx
+ popq %rdi
-+ pax_force_retaddr
-+ retq
++ pax_ret pax_enter_kernel_user
+4: ud2
+ENDPROC(pax_enter_kernel_user)
+
+ENTRY(pax_exit_kernel_user)
++GLOBAL(patch_pax_exit_kernel_user)
+ pushq %rdi
+ pushq %rbx
+
@@ -17837,7 +19597,7 @@ index 02fff3e..c6685ec 100644
+ mov i*8(%rbx),%rsi
+ mov $0x67,%sil
+ lea i*8(%rbx),%rdi
-+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ pax_indirect_call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched), pv_mmu_ops.set_pgd_batched
+ i = i + 1
+ .endr
+#else
@@ -17856,8 +19616,7 @@ index 02fff3e..c6685ec 100644
+
+ popq %rbx
+ popq %rdi
-+ pax_force_retaddr
-+ retq
++ pax_ret pax_exit_kernel_user
+3: ud2
+ENDPROC(pax_exit_kernel_user)
+#endif
@@ -17915,7 +19674,7 @@ index 02fff3e..c6685ec 100644
+
+ .macro pax_erase_kstack
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ call pax_erase_kstack
++ pax_direct_call pax_erase_kstack
+#endif
+ .endm
+
@@ -17926,8 +19685,8 @@ index 02fff3e..c6685ec 100644
+ pushq %rax
+ pushq %r11
+
-+ GET_THREAD_INFO(%r11)
-+ mov TI_lowest_stack(%r11), %rdi
++ GET_CURRENT(%r11)
++ mov TASK_lowest_stack(%r11), %rdi
+ mov $-0xBEEF, %rax
+ std
+
@@ -17958,23 +19717,38 @@ index 02fff3e..c6685ec 100644
+ shr $3, %ecx
+ rep stosq
+
-+ mov TI_task_thread_sp0(%r11), %rdi
++ mov TASK_thread_sp0(%r11), %rdi
+ sub $256, %rdi
-+ mov %rdi, TI_lowest_stack(%r11)
++ mov %rdi, TASK_lowest_stack(%r11)
+
+ popq %r11
+ popq %rax
+ popq %rcx
+ popq %rdi
-+ pax_force_retaddr
-+ ret
++ pax_ret pax_erase_kstack
+ENDPROC(pax_erase_kstack)
+#endif
+
.macro TRACE_IRQS_IRETQ
#ifdef CONFIG_TRACE_IRQFLAGS
bt $9, EFLAGS(%rsp) /* interrupts off? */
-@@ -88,7 +479,7 @@ ENDPROC(native_usergs_sysret64)
+@@ -77,19 +466,19 @@ ENDPROC(native_usergs_sysret64)
+ #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
+
+ .macro TRACE_IRQS_OFF_DEBUG
+- call debug_stack_set_zero
++ pax_direct_call debug_stack_set_zero
+ TRACE_IRQS_OFF
+- call debug_stack_reset
++ pax_direct_call debug_stack_reset
+ .endm
+
+ .macro TRACE_IRQS_ON_DEBUG
+- call debug_stack_set_zero
++ pax_direct_call debug_stack_set_zero
+ TRACE_IRQS_ON
+- call debug_stack_reset
++ pax_direct_call debug_stack_reset
.endm
.macro TRACE_IRQS_IRETQ_DEBUG
@@ -17983,7 +19757,7 @@ index 02fff3e..c6685ec 100644
jnc 1f
TRACE_IRQS_ON_DEBUG
1:
-@@ -175,11 +566,22 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+@@ -176,6 +565,16 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
pushq %r11 /* pt_regs->r11 */
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
@@ -18000,20 +19774,17 @@ index 02fff3e..c6685ec 100644
/*
* If we need to do entry work or if we guess we'll need to do
* exit work, go straight to the slow path.
+@@ -206,7 +605,7 @@ entry_SYSCALL_64_fastpath:
+ * It might end up jumping to the slow path. If it jumps, RAX
+ * and all argument registers are clobbered.
*/
-- testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-+ GET_THREAD_INFO(%rcx)
-+ testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TI_flags(%rcx)
- jnz entry_SYSCALL64_slow_path
+- call *sys_call_table(, %rax, 8)
++ pax_indirect_call "sys_call_table(, %rax, 8)", sys_ni_syscall
+ .Lentry_SYSCALL_64_after_fastpath_call:
- entry_SYSCALL_64_fastpath:
-@@ -217,9 +619,13 @@ entry_SYSCALL_64_fastpath:
- */
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
-- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-+ GET_THREAD_INFO(%rcx)
-+ testl $_TIF_ALLWORK_MASK, TI_flags(%rcx)
+ movq %rax, RAX(%rsp)
+@@ -223,6 +622,9 @@ entry_SYSCALL_64_fastpath:
+ testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
jnz 1f
+ pax_exit_kernel_user
@@ -18022,8 +19793,20 @@ index 02fff3e..c6685ec 100644
LOCKDEP_SYS_EXIT
TRACE_IRQS_ON /* user mode is traced as IRQs on */
movq RIP(%rsp), %rcx
-@@ -248,6 +654,9 @@ entry_SYSCALL64_slow_path:
- call do_syscall_64 /* returns with IRQs disabled */
+@@ -241,16 +643,19 @@ entry_SYSCALL_64_fastpath:
+ ENABLE_INTERRUPTS(CLBR_NONE)
+ SAVE_EXTRA_REGS
+ movq %rsp, %rdi
+- call syscall_return_slowpath /* returns with IRQs disabled */
++ pax_direct_call syscall_return_slowpath /* returns with IRQs disabled */
+ jmp return_from_SYSCALL_64
+
+ entry_SYSCALL64_slow_path:
+ /* IRQs are off. */
+ SAVE_EXTRA_REGS
+ movq %rsp, %rdi
+- call do_syscall_64 /* returns with IRQs disabled */
++ pax_direct_call do_syscall_64 /* returns with IRQs disabled */
return_from_SYSCALL_64:
+ pax_exit_kernel_user
@@ -18032,7 +19815,7 @@ index 02fff3e..c6685ec 100644
RESTORE_EXTRA_REGS
TRACE_IRQS_IRETQ /* we're about to change IF */
-@@ -272,13 +681,12 @@ return_from_SYSCALL_64:
+@@ -275,13 +680,12 @@ return_from_SYSCALL_64:
.error "virtual address width changed -- SYSRET checks need update"
.endif
@@ -18052,7 +19835,7 @@ index 02fff3e..c6685ec 100644
cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */
jne opportunistic_sysret_failed
-@@ -326,7 +734,7 @@ syscall_return_via_sysret:
+@@ -329,7 +733,7 @@ syscall_return_via_sysret:
opportunistic_sysret_failed:
SWAPGS
jmp restore_c_regs_and_iret
@@ -18061,37 +19844,103 @@ index 02fff3e..c6685ec 100644
ENTRY(stub_ptregs_64)
/*
-@@ -353,13 +761,13 @@ ENTRY(stub_ptregs_64)
+@@ -355,13 +759,17 @@ ENTRY(stub_ptregs_64)
+
1:
- /* Called from C */
- jmp *%rax /* called from C */
+ jmp *%rax /* Called from C */
-END(stub_ptregs_64)
+ENDPROC(stub_ptregs_64)
.macro ptregs_stub func
- ENTRY(ptregs_\func)
+-ENTRY(ptregs_\func)
++RAP_ENTRY(ptregs_\func)
++#ifdef CONFIG_PAX_RAP
++ leaq rap_\func(%rip), %rax
++#else
leaq \func(%rip), %rax
++#endif
jmp stub_ptregs_64
-END(ptregs_\func)
+ENDPROC(ptregs_\func)
.endm
/* Instantiate ptregs_stub for each ptregs-using syscall */
-@@ -401,10 +809,12 @@ ENTRY(ret_from_fork)
- 1:
+@@ -381,7 +789,9 @@ ENTRY(__switch_to_asm)
+ */
+ pushq %rbp
+ pushq %rbx
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+ pushq %r12
++#endif
+ pushq %r13
+ pushq %r14
+ pushq %r15
+@@ -399,38 +809,49 @@ ENTRY(__switch_to_asm)
+ popq %r15
+ popq %r14
+ popq %r13
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+ popq %r12
++#endif
+ popq %rbx
+ popq %rbp
+
+ jmp __switch_to
+-END(__switch_to_asm)
++ENDPROC(__switch_to_asm)
+
+ /*
+ * A newly forked process directly context switches into this address.
+ *
+ * rax: prev task we switched from
+ * rbx: kernel thread func (NULL for user thread)
+- * r12: kernel thread arg
++ * r13: kernel thread arg
+ */
++#ifdef CONFIG_PAX_RAP
++ __ALIGN
++ pax_retloc __switch_to
++ .globl ret_from_fork
++ret_from_fork:
++#else
+ ENTRY(ret_from_fork)
++#endif
+ movq %rax, %rdi
+- call schedule_tail /* rdi: 'prev' task parameter */
++ pax_direct_call schedule_tail /* rdi: 'prev' task parameter */
+
+ testq %rbx, %rbx /* from kernel_thread? */
+ jnz 1f /* kernel threads are uncommon */
+
+ 2:
movq %rsp, %rdi
- call syscall_return_slowpath /* returns with IRQs disabled */
+- call syscall_return_slowpath /* returns with IRQs disabled */
++ pax_direct_call syscall_return_slowpath /* returns with IRQs disabled */
+ pax_exit_kernel_user
+ pax_erase_kstack
TRACE_IRQS_ON /* user mode is traced as IRQS on */
SWAPGS
jmp restore_regs_and_iret
+
+ 1:
+ /* kernel thread */
+- movq %r12, %rdi
+- call *%rbx
++ movq %r13, %rdi
++ pax_indirect_call %rbx, kthreadd
+ /*
+ * A kernel thread is allowed to return here after successfully
+ * calling do_execve(). Exit to userspace to complete the execve()
+@@ -438,7 +859,7 @@ ENTRY(ret_from_fork)
+ */
+ movq $0, RAX(%rsp)
+ jmp 2b
-END(ret_from_fork)
+ENDPROC(ret_from_fork)
/*
* Build the entry stubs with some assembler magic.
-@@ -419,7 +829,7 @@ ENTRY(irq_entries_start)
+@@ -453,7 +874,7 @@ ENTRY(irq_entries_start)
jmp common_interrupt
.align 8
.endr
@@ -18100,7 +19949,7 @@ index 02fff3e..c6685ec 100644
/*
* Interrupt entry/exit.
-@@ -445,6 +855,12 @@ END(irq_entries_start)
+@@ -479,6 +900,12 @@ END(irq_entries_start)
*/
SWAPGS
@@ -18113,7 +19962,7 @@ index 02fff3e..c6685ec 100644
/*
* We need to tell lockdep that IRQs are off. We can't do this until
* we fix gsbase, and we should do it before enter_from_user_mode
-@@ -457,7 +873,9 @@ END(irq_entries_start)
+@@ -491,7 +918,9 @@ END(irq_entries_start)
CALL_enter_from_user_mode
@@ -18124,7 +19973,7 @@ index 02fff3e..c6685ec 100644
/*
* Save previous stack pointer, optionally switch to interrupt stack.
* irq_count is used to check if a CPU is already on an interrupt stack
-@@ -469,6 +887,7 @@ END(irq_entries_start)
+@@ -503,10 +932,11 @@ END(irq_entries_start)
incl PER_CPU_VAR(irq_count)
cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
pushq %rdi
@@ -18132,16 +19981,28 @@ index 02fff3e..c6685ec 100644
/* We entered an interrupt context - irqs are off: */
TRACE_IRQS_OFF
-@@ -500,6 +919,8 @@ ret_from_intr:
+- call \func /* rdi points to pt_regs */
++ pax_direct_call \func /* rdi points to pt_regs */
+ .endm
+
+ /*
+@@ -533,7 +963,9 @@ ret_from_intr:
+ /* Interrupt came from user space */
GLOBAL(retint_user)
mov %rsp,%rdi
- call prepare_exit_to_usermode
+- call prepare_exit_to_usermode
++ pax_direct_call prepare_exit_to_usermode
+ pax_exit_kernel_user
+# pax_erase_kstack
TRACE_IRQS_IRETQ
SWAPGS
jmp restore_regs_and_iret
-@@ -517,6 +938,21 @@ retint_kernel:
+@@ -547,10 +979,25 @@ retint_kernel:
+ jnc 1f
+ 0: cmpl $0, PER_CPU_VAR(__preempt_count)
+ jnz 1f
+- call preempt_schedule_irq
++ pax_direct_call preempt_schedule_irq
jmp 0b
1:
#endif
@@ -18163,29 +20024,29 @@ index 02fff3e..c6685ec 100644
/*
* The iretq could re-enable interrupts:
*/
-@@ -560,15 +996,15 @@ native_irq_return_ldt:
+@@ -614,15 +1061,15 @@ native_irq_return_ldt:
SWAPGS
movq PER_CPU_VAR(espfix_waddr), %rdi
- movq %rax, (0*8)(%rdi) /* RAX */
-- movq (2*8)(%rsp), %rax /* RIP */
-+ movq (2*8 + RIP-RIP)(%rsp), %rax /* RIP */
+ movq %rax, (0*8)(%rdi) /* user RAX */
+- movq (1*8)(%rsp), %rax /* user RIP */
++ movq (8 + RIP-RIP)(%rsp), %rax /* user RIP */
movq %rax, (1*8)(%rdi)
-- movq (3*8)(%rsp), %rax /* CS */
-+ movq (2*8 + CS-RIP)(%rsp), %rax /* CS */
+- movq (2*8)(%rsp), %rax /* user CS */
++ movq (8 + CS-RIP)(%rsp), %rax /* user CS */
movq %rax, (2*8)(%rdi)
-- movq (4*8)(%rsp), %rax /* RFLAGS */
-+ movq (2*8 + EFLAGS-RIP)(%rsp), %rax /* RFLAGS */
+- movq (3*8)(%rsp), %rax /* user RFLAGS */
++ movq (8 + EFLAGS-RIP)(%rsp), %rax /* user RFLAGS */
movq %rax, (3*8)(%rdi)
-- movq (6*8)(%rsp), %rax /* SS */
-+ movq (2*8 + SS-RIP)(%rsp), %rax /* SS */
+- movq (5*8)(%rsp), %rax /* user SS */
++ movq (8 + SS-RIP)(%rsp), %rax /* user SS */
movq %rax, (5*8)(%rdi)
-- movq (5*8)(%rsp), %rax /* RSP */
-+ movq (2*8 + RSP-RIP)(%rsp), %rax /* RSP */
+- movq (4*8)(%rsp), %rax /* user RSP */
++ movq (8 + RSP-RIP)(%rsp), %rax /* user RSP */
movq %rax, (4*8)(%rdi)
- andl $0xffff0000, %eax
- popq %rdi
-@@ -578,7 +1014,7 @@ native_irq_return_ldt:
- popq %rax
+ /* Now RAX == RSP. */
+
+@@ -654,7 +1101,7 @@ native_irq_return_ldt:
+ */
jmp native_irq_return_iret
#endif
-END(common_interrupt)
@@ -18193,7 +20054,7 @@ index 02fff3e..c6685ec 100644
/*
* APIC interrupts.
-@@ -590,7 +1026,7 @@ ENTRY(\sym)
+@@ -666,7 +1113,7 @@ ENTRY(\sym)
.Lcommon_\sym:
interrupt \do_sym
jmp ret_from_intr
@@ -18202,16 +20063,41 @@ index 02fff3e..c6685ec 100644
.endm
#ifdef CONFIG_TRACING
-@@ -666,7 +1102,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
+@@ -742,15 +1189,19 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
/*
* Exception entry points.
*/
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
+#define CPU_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
- .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
+-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
++.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 rap_hash=0
ENTRY(\sym)
-@@ -713,6 +1149,12 @@ ENTRY(\sym)
+ /* Sanity check */
+ .if \shift_ist != -1 && \paranoid == 0
+ .error "using shift_ist requires paranoid=1"
+ .endif
+
++ .if \paranoid != 0 && \rap_hash==tailcall
++ .error "tail called idt entry cannot be paranoid"
++ .endif
++
+ ASM_CLAC
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
+
+@@ -765,9 +1216,9 @@ ENTRY(\sym)
+ testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
+ jnz 1f
+ .endif
+- call paranoid_entry
++ pax_direct_call paranoid_entry
+ .else
+- call error_entry
++ pax_direct_call error_entry
+ .endif
+ /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
+
+@@ -789,10 +1240,23 @@ ENTRY(\sym)
.endif
.if \shift_ist != -1
@@ -18224,7 +20110,42 @@ index 02fff3e..c6685ec 100644
subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
.endif
-@@ -756,7 +1198,7 @@ ENTRY(\sym)
+- call \do_sym
++ .ifc \rap_hash,tailcall
++ jmp \do_sym
++ .exitm
++ .elseif \rap_hash == 0
++ pax_direct_call \do_sym
++ .else
++ pax_indirect_call \do_sym, \rap_hash
++ .endif
+
+ .if \shift_ist != -1
+ addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
+@@ -812,11 +1276,11 @@ ENTRY(\sym)
+ * run in real process context if user_mode(regs).
+ */
+ 1:
+- call error_entry
++ pax_direct_call error_entry
+
+
+ movq %rsp, %rdi /* pt_regs pointer */
+- call sync_regs
++ pax_direct_call sync_regs
+ movq %rax, %rsp /* switch stack */
+
+ movq %rsp, %rdi /* pt_regs pointer */
+@@ -828,11 +1292,15 @@ ENTRY(\sym)
+ xorl %esi, %esi /* no error code */
+ .endif
+
+- call \do_sym
++ .if \rap_hash == 0
++ pax_direct_call \do_sym
++ .else
++ pax_indirect_call \do_sym, \rap_hash
++ .endif
jmp error_exit /* %ebx: no swapgs flag */
.endif
@@ -18233,40 +20154,67 @@ index 02fff3e..c6685ec 100644
.endm
#ifdef CONFIG_TRACING
-@@ -784,6 +1226,9 @@ idtentry coprocessor_error do_coprocessor_error has_error_code=0
+@@ -860,6 +1328,14 @@ idtentry coprocessor_error do_coprocessor_error has_error_code=0
idtentry alignment_check do_alignment_check has_error_code=1
idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
+#ifdef CONFIG_PAX_REFCOUNT
+idtentry refcount_error do_refcount_error has_error_code=0
+#endif
++
++#ifdef CONFIG_PAX_RAP
++idtentry rap_call_error do_rap_call_error has_error_code=0
++idtentry rap_ret_error do_rap_ret_error has_error_code=0
++#endif
/*
* Reload gs selector with exception handling
-@@ -798,8 +1243,9 @@ ENTRY(native_load_gs_index)
+@@ -874,8 +1350,8 @@ ENTRY(native_load_gs_index)
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
SWAPGS
popfq
-+ pax_force_retaddr
- ret
+- ret
-END(native_load_gs_index)
++ pax_ret native_load_gs_index
+ENDPROC(native_load_gs_index)
+ EXPORT_SYMBOL(native_load_gs_index)
_ASM_EXTABLE(.Lgs_change, bad_gs)
- .section .fixup, "ax"
-@@ -827,8 +1273,9 @@ ENTRY(do_softirq_own_stack)
- call __do_softirq
+@@ -901,14 +1377,14 @@ ENTRY(do_softirq_own_stack)
+ incl PER_CPU_VAR(irq_count)
+ cmove PER_CPU_VAR(irq_stack_ptr), %rsp
+ push %rbp /* frame pointer backlink */
+- call __do_softirq
++ pax_direct_call __do_softirq
leaveq
decl PER_CPU_VAR(irq_count)
-+ pax_force_retaddr
- ret
+- ret
-END(do_softirq_own_stack)
++ pax_ret do_softirq_own_stack
+ENDPROC(do_softirq_own_stack)
#ifdef CONFIG_XEN
- idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-@@ -864,7 +1311,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
- call xen_maybe_preempt_hcall
+-idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
++idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 rap_hash=tailcall
+
+ /*
+ * A note on the "critical region" in our callback handler.
+@@ -929,19 +1405,18 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
+ * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
+ * see the correct pointer to the pt_regs
+ */
+- movq %rdi, %rsp /* we don't return, adjust the stack frame */
+ 11: incl PER_CPU_VAR(irq_count)
+ movq %rsp, %rbp
+ cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
+ pushq %rbp /* frame pointer backlink */
+- call xen_evtchn_do_upcall
++ pax_direct_call xen_evtchn_do_upcall
+ popq %rsp
+ decl PER_CPU_VAR(irq_count)
+ #ifndef CONFIG_PREEMPT
+- call xen_maybe_preempt_hcall
++ pax_direct_call xen_maybe_preempt_hcall
#endif
jmp error_exit
-END(xen_do_hypervisor_callback)
@@ -18274,7 +20222,7 @@ index 02fff3e..c6685ec 100644
/*
* Hypervisor uses this for application faults while it executes.
-@@ -909,7 +1356,7 @@ ENTRY(xen_failsafe_callback)
+@@ -986,7 +1461,7 @@ ENTRY(xen_failsafe_callback)
SAVE_C_REGS
SAVE_EXTRA_REGS
jmp error_exit
@@ -18283,7 +20231,7 @@ index 02fff3e..c6685ec 100644
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -921,7 +1368,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
+@@ -998,7 +1473,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
hyperv_callback_vector hyperv_vector_handler
#endif /* CONFIG_HYPERV */
@@ -18292,7 +20240,16 @@ index 02fff3e..c6685ec 100644
idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
idtentry stack_segment do_stack_segment has_error_code=1
-@@ -958,8 +1405,34 @@ ENTRY(paranoid_entry)
+@@ -1016,7 +1491,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
+ #endif
+
+ #ifdef CONFIG_X86_MCE
+-idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
++idtentry machine_check has_error_code=0 paranoid=1 do_sym="machine_check_vector(%rip)" rap_hash=do_machine_check
+ #endif
+
+ /*
+@@ -1035,8 +1510,32 @@ ENTRY(paranoid_entry)
js 1f /* negative -> in kernel */
SWAPGS
xorl %ebx, %ebx
@@ -18307,8 +20264,7 @@ index 02fff3e..c6685ec 100644
+#endif
+1: pax_enter_kernel
+2:
-+ pax_force_retaddr
-+ ret
++ pax_ret paranoid_entry
+ENDPROC(paranoid_entry)
+
+ENTRY(paranoid_entry_nmi)
@@ -18323,13 +20279,12 @@ index 02fff3e..c6685ec 100644
+ SWAPGS
+ xorl %ebx, %ebx
+1: pax_enter_kernel_nmi
-+ pax_force_retaddr
-+ ret
++ pax_ret paranoid_entry_nmi
+ENDPROC(paranoid_entry_nmi)
/*
* "Paranoid" exit path from exception stack. This is invoked
-@@ -976,19 +1449,26 @@ END(paranoid_entry)
+@@ -1053,19 +1552,26 @@ END(paranoid_entry)
ENTRY(paranoid_exit)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF_DEBUG
@@ -18358,7 +20313,7 @@ index 02fff3e..c6685ec 100644
/*
* Save all registers in pt_regs, and switch gs if needed.
-@@ -1008,6 +1488,12 @@ ENTRY(error_entry)
+@@ -1085,6 +1591,12 @@ ENTRY(error_entry)
*/
SWAPGS
@@ -18371,20 +20326,21 @@ index 02fff3e..c6685ec 100644
.Lerror_entry_from_usermode_after_swapgs:
/*
* We need to tell lockdep that IRQs are off. We can't do this until
-@@ -1016,10 +1502,12 @@ ENTRY(error_entry)
+@@ -1093,11 +1605,11 @@ ENTRY(error_entry)
*/
TRACE_IRQS_OFF
CALL_enter_from_user_mode
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret error_entry
.Lerror_entry_done:
TRACE_IRQS_OFF
-+ pax_force_retaddr
- ret
+- ret
++ pax_ret error_entry
/*
-@@ -1037,7 +1525,7 @@ ENTRY(error_entry)
+ * There are two places in the kernel that can potentially fault with
+@@ -1114,7 +1626,7 @@ ENTRY(error_entry)
cmpq %rax, RIP+8(%rsp)
je .Lbstep_iret
cmpq $.Lgs_change, RIP+8(%rsp)
@@ -18393,7 +20349,7 @@ index 02fff3e..c6685ec 100644
/*
* hack: .Lgs_change can fail with user gsbase. If this happens, fix up
-@@ -1045,7 +1533,8 @@ ENTRY(error_entry)
+@@ -1122,7 +1634,8 @@ ENTRY(error_entry)
* .Lgs_change's error handler with kernel gsbase.
*/
SWAPGS
@@ -18403,7 +20359,7 @@ index 02fff3e..c6685ec 100644
.Lbstep_iret:
/* Fix truncated RIP */
-@@ -1059,6 +1548,12 @@ ENTRY(error_entry)
+@@ -1136,17 +1649,23 @@ ENTRY(error_entry)
*/
SWAPGS
@@ -18416,7 +20372,11 @@ index 02fff3e..c6685ec 100644
/*
* Pretend that the exception came from user mode: set up pt_regs
* as if we faulted immediately after IRET and clear EBX so that
-@@ -1069,11 +1564,11 @@ ENTRY(error_entry)
+ * error_exit knows that we will be returning to user mode.
+ */
+ mov %rsp, %rdi
+- call fixup_bad_iret
++ pax_direct_call fixup_bad_iret
mov %rax, %rsp
decl %ebx
jmp .Lerror_entry_from_usermode_after_swapgs
@@ -18425,12 +20385,7 @@ index 02fff3e..c6685ec 100644
/*
-- * On entry, EBS is a "return to kernel mode" flag:
-+ * On entry, EBX is a "return to kernel mode" flag:
- * 1: already in kernel mode, don't need SWAPGS
- * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
- */
-@@ -1081,10 +1576,10 @@ ENTRY(error_exit)
+@@ -1158,10 +1677,10 @@ ENTRY(error_exit)
movl %ebx, %eax
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
@@ -18443,7 +20398,7 @@ index 02fff3e..c6685ec 100644
/* Runs on exception stack */
ENTRY(nmi)
-@@ -1138,6 +1633,8 @@ ENTRY(nmi)
+@@ -1215,6 +1734,8 @@ ENTRY(nmi)
* other IST entries.
*/
@@ -18452,7 +20407,7 @@ index 02fff3e..c6685ec 100644
/* Use %rdx as our temp variable throughout */
pushq %rdx
-@@ -1181,6 +1678,12 @@ ENTRY(nmi)
+@@ -1258,6 +1779,12 @@ ENTRY(nmi)
pushq %r14 /* pt_regs->r14 */
pushq %r15 /* pt_regs->r15 */
@@ -18465,15 +20420,18 @@ index 02fff3e..c6685ec 100644
/*
* At this point we no longer need to worry about stack damage
* due to nesting -- we're on the normal thread stack and we're
-@@ -1191,12 +1694,19 @@ ENTRY(nmi)
- movq $-1, %rsi
- call do_nmi
+@@ -1266,7 +1793,9 @@ ENTRY(nmi)
-+ pax_exit_kernel_nmi
+ movq %rsp, %rdi
+ movq $-1, %rsi
+- call do_nmi
++ pax_direct_call do_nmi
+
++ pax_exit_kernel_nmi
+
/*
* Return back to user mode. We must *not* do the normal exit
- * work, because we don't want to enable interrupts. Fortunately,
+@@ -1274,6 +1803,11 @@ ENTRY(nmi)
* do_nmi doesn't modify pt_regs.
*/
SWAPGS
@@ -18485,7 +20443,7 @@ index 02fff3e..c6685ec 100644
jmp restore_c_regs_and_iret
.Lnmi_from_kernel:
-@@ -1318,6 +1828,7 @@ nested_nmi_out:
+@@ -1395,6 +1929,7 @@ nested_nmi_out:
popq %rdx
/* We are returning to kernel mode, so this cannot result in a fault. */
@@ -18493,7 +20451,7 @@ index 02fff3e..c6685ec 100644
INTERRUPT_RETURN
first_nmi:
-@@ -1346,7 +1857,7 @@ first_nmi:
+@@ -1423,7 +1958,7 @@ first_nmi:
pushq %rsp /* RSP (minus 8 because of the previous push) */
addq $8, (%rsp) /* Fix up RSP */
pushfq /* RFLAGS */
@@ -18502,7 +20460,7 @@ index 02fff3e..c6685ec 100644
pushq $1f /* RIP */
INTERRUPT_RETURN /* continues at repeat_nmi below */
1:
-@@ -1391,20 +1902,22 @@ end_repeat_nmi:
+@@ -1468,20 +2003,22 @@ end_repeat_nmi:
ALLOC_PT_GPREGS_ON_STACK
/*
@@ -18514,12 +20472,13 @@ index 02fff3e..c6685ec 100644
* exceptions might do.
*/
- call paranoid_entry
-+ call paranoid_entry_nmi
++ pax_direct_call paranoid_entry_nmi
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp, %rdi
movq $-1, %rsi
- call do_nmi
+- call do_nmi
++ pax_direct_call do_nmi
- testl %ebx, %ebx /* swapgs needed? */
+ pax_exit_kernel_nmi
@@ -18528,7 +20487,7 @@ index 02fff3e..c6685ec 100644
jnz nmi_restore
nmi_swapgs:
SWAPGS_UNSAFE_STACK
-@@ -1415,6 +1928,8 @@ nmi_restore:
+@@ -1492,6 +2029,8 @@ nmi_restore:
/* Point RSP at the "iret" frame. */
REMOVE_PT_GPREGS_FROM_STACK 6*8
@@ -18537,7 +20496,7 @@ index 02fff3e..c6685ec 100644
/*
* Clear "NMI executing". Set DF first so that we can easily
* distinguish the remaining code between here and IRET from
-@@ -1432,12 +1947,12 @@ nmi_restore:
+@@ -1509,12 +2048,12 @@ nmi_restore:
* mode, so this cannot result in a fault.
*/
INTERRUPT_RETURN
@@ -18552,17 +20511,17 @@ index 02fff3e..c6685ec 100644
ENTRY(rewind_stack_do_exit)
/* Prevent any naive code from trying to unwind to our caller. */
-@@ -1446,6 +1961,6 @@ ENTRY(rewind_stack_do_exit)
+@@ -1523,6 +2062,6 @@ ENTRY(rewind_stack_do_exit)
movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp
- call do_exit
-+ call do_group_exit
++ pax_direct_call do_group_exit
1: jmp 1b
-END(rewind_stack_do_exit)
+ENDPROC(rewind_stack_do_exit)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
-index e1721da..83f2c49 100644
+index e1721da..28b685f 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -13,11 +13,39 @@
@@ -18579,18 +20538,18 @@ index e1721da..83f2c49 100644
+ .macro pax_enter_kernel_user
+ pax_set_fptr_mask
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_enter_kernel_user
++ pax_direct_call pax_enter_kernel_user
+#endif
+ .endm
+
+ .macro pax_exit_kernel_user
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_exit_kernel_user
++ pax_direct_call pax_exit_kernel_user
+#endif
+#ifdef CONFIG_PAX_RANDKSTACK
+ pushq %rax
+ pushq %r11
-+ call pax_randomize_kstack
++ pax_direct_call pax_randomize_kstack
+ popq %r11
+ popq %rax
+#endif
@@ -18598,7 +20557,7 @@ index e1721da..83f2c49 100644
+
+ .macro pax_erase_kstack
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ call pax_erase_kstack
++ pax_direct_call pax_erase_kstack
+#endif
+ .endm
+
@@ -18648,6 +20607,15 @@ index e1721da..83f2c49 100644
/*
* SYSENTER doesn't filter flags, so we need to clear NT and AC
* ourselves. To save a few cycles, we can check whether
+@@ -121,7 +160,7 @@ ENTRY(entry_SYSENTER_compat)
+ TRACE_IRQS_OFF
+
+ movq %rsp, %rdi
+- call do_fast_syscall_32
++ pax_direct_call do_fast_syscall_32
+ /* XEN PV guests always use IRET path */
+ ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
+ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
@@ -204,16 +243,27 @@ ENTRY(entry_SYSCALL_compat)
pushq %rdx /* pt_regs->dx */
pushq %rbp /* pt_regs->cx (stashed in bp) */
@@ -18684,7 +20652,15 @@ index e1721da..83f2c49 100644
/*
* User mode is traced as though IRQs are on, and SYSENTER
-@@ -229,11 +279,18 @@ ENTRY(entry_SYSCALL_compat)
+@@ -222,18 +272,25 @@ ENTRY(entry_SYSCALL_compat)
+ TRACE_IRQS_OFF
+
+ movq %rsp, %rdi
+- call do_fast_syscall_32
++ pax_direct_call do_fast_syscall_32
+ /* XEN PV guests always use IRET path */
+ ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
+ "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
/* Opportunistic SYSRET */
sysret32_from_system_call:
@@ -18741,7 +20717,12 @@ index e1721da..83f2c49 100644
/*
* User mode is traced as though IRQs are on, and the interrupt
* gate turned them off.
-@@ -337,10 +401,12 @@ ENTRY(entry_INT80_compat)
+@@ -333,17 +397,23 @@ ENTRY(entry_INT80_compat)
+ TRACE_IRQS_OFF
+
+ movq %rsp, %rdi
+- call do_int80_syscall_32
++ pax_direct_call do_int80_syscall_32
.Lsyscall_32_done:
/* Go back to user mode. */
@@ -18754,32 +20735,178 @@ index e1721da..83f2c49 100644
+ENDPROC(entry_INT80_compat)
ALIGN
++#ifdef CONFIG_PAX_RAP
++RAP_ENTRY(rap_stub32_clone)
++#else
GLOBAL(stub32_clone)
++#endif
+ /*
+ * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
+ * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
+@@ -352,4 +422,8 @@ GLOBAL(stub32_clone)
+ * so we need to swap arguments here before calling it:
+ */
+ xchg %r8, %rcx
++#ifdef CONFIG_PAX_RAP
++ jmp rap_sys_clone
++#else
+ jmp sys_clone
++#endif
+diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
+index 8f895ee..5cc22ed 100644
+--- a/arch/x86/entry/syscall_32.c
++++ b/arch/x86/entry/syscall_32.c
+@@ -6,11 +6,19 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/syscall.h>
+
++#ifdef CONFIG_PAX_RAP
++#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long rap_##sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
++#else
+ #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
++#endif
+ #include <asm/syscalls_32.h>
+ #undef __SYSCALL_I386
+
++#ifdef CONFIG_PAX_RAP
++#define __SYSCALL_I386(nr, sym, qual) [nr] = rap_##sym,
++#else
+ #define __SYSCALL_I386(nr, sym, qual) [nr] = sym,
++#endif
+
+ extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+
+diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
+index 9dbc5ab..b2d64fb 100644
+--- a/arch/x86/entry/syscall_64.c
++++ b/arch/x86/entry/syscall_64.c
+@@ -6,7 +6,11 @@
+ #include <asm/asm-offsets.h>
+ #include <asm/syscall.h>
+
++#ifdef CONFIG_PAX_RAP
++#define __SYSCALL_64_QUAL_(sym) rap_##sym
++#else
+ #define __SYSCALL_64_QUAL_(sym) sym
++#endif
+ #define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym
+
+ #define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
+index fee6bc7..02a69fb 100644
+--- a/arch/x86/entry/thunk_32.S
++++ b/arch/x86/entry/thunk_32.S
+@@ -5,7 +5,7 @@
+ * Subject to the GNU public license, v.2. No warranty of any kind.
+ */
+ #include <linux/linkage.h>
+- #include <asm/asm.h>
++ #include <asm/alternative-asm.h>
+ #include <asm/export.h>
+
+ /* put return address in eax (arg1) */
+@@ -21,11 +21,11 @@
+ movl 3*4(%esp), %eax
+ .endif
+
+- call \func
++ pax_direct_call \func
+ popl %edx
+ popl %ecx
+ popl %eax
+- ret
++ pax_ret \name
+ _ASM_NOKPROBE(\name)
+ .endm
+
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
-index 627ecbc..6490d11 100644
+index be36bf4..a22f673 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
-@@ -8,6 +8,7 @@
- #include <linux/linkage.h>
+@@ -9,6 +9,7 @@
#include "calling.h"
#include <asm/asm.h>
+ #include <asm/export.h>
+#include <asm/alternative-asm.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
-@@ -65,6 +66,7 @@
- popq %rsi
- popq %rdi
- popq %rbp
-+ pax_force_retaddr
- ret
- _ASM_NOKPROBE(.L_restore)
+@@ -33,8 +34,19 @@
+ movq 8(%rbp), %rdi
+ .endif
+
+- call \func
+- jmp .L_restore
++ pax_direct_call \func
++
++ popq %r11
++ popq %r10
++ popq %r9
++ popq %r8
++ popq %rax
++ popq %rcx
++ popq %rdx
++ popq %rsi
++ popq %rdi
++ popq %rbp
++ pax_ret \name
+ _ASM_NOKPROBE(\name)
+ .endm
+
+@@ -53,21 +65,3 @@
+ EXPORT_SYMBOL(___preempt_schedule)
+ EXPORT_SYMBOL(___preempt_schedule_notrace)
#endif
+-
+-#if defined(CONFIG_TRACE_IRQFLAGS) \
+- || defined(CONFIG_DEBUG_LOCK_ALLOC) \
+- || defined(CONFIG_PREEMPT)
+-.L_restore:
+- popq %r11
+- popq %r10
+- popq %r9
+- popq %r8
+- popq %rax
+- popq %rcx
+- popq %rdx
+- popq %rsi
+- popq %rdi
+- popq %rbp
+- ret
+- _ASM_NOKPROBE(.L_restore)
+-#endif
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
-index d540966..443f0d7 100644
+index d540966..3ea2a6a 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
-@@ -170,7 +170,7 @@ quiet_cmd_vdso = VDSO $@
+@@ -17,6 +17,9 @@ VDSO32-$(CONFIG_IA32_EMULATION) := y
+
+ # files to link into the vdso
+ vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
++GCC_PLUGINS_vdso-note.o := n
++GCC_PLUGINS_vclock_gettime.o := n
++GCC_PLUGINS_vgetcpu.o := n
+
+ # files to link into kernel
+ obj-y += vma.o
+@@ -75,7 +78,7 @@ CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
+ -fno-omit-frame-pointer -foptimize-sibling-calls \
+ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
+
+-$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
++$(vobjs): KBUILD_CFLAGS := $(KBUILD_CFLAGS) $(CFL)
+
+ #
+ # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
+@@ -145,7 +148,6 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
+ KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
+ KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
+-KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
+ KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
+ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
+ KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
+@@ -170,7 +172,7 @@ quiet_cmd_vdso = VDSO $@
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
@@ -18789,7 +20916,7 @@ index d540966..443f0d7 100644
GCOV_PROFILE := n
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
-index 94d54d0..390dce1 100644
+index 02223cb..84f10fc 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -300,5 +300,5 @@ notrace time_t __vdso_time(time_t *t)
@@ -18822,7 +20949,7 @@ index 3dab75f..2c439d0 100644
GET_LE(&symtab_hdr->sh_entsize) * i;
const char *name = raw_addr + GET_LE(&strtab_hdr->sh_offset) +
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
-index f840766..222abb1 100644
+index 23c881c..e4808fc 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -21,10 +21,7 @@
@@ -18837,7 +20964,7 @@ index f840766..222abb1 100644
void __init init_vdso_image(const struct vdso_image *image)
{
-@@ -90,7 +87,7 @@ static int vdso_fault(const struct vm_special_mapping *sm,
+@@ -42,7 +39,7 @@ static int vdso_fault(const struct vm_special_mapping *sm,
{
const struct vdso_image *image = vma->vm_mm->context.vdso_image;
@@ -18846,7 +20973,7 @@ index f840766..222abb1 100644
return VM_FAULT_SIGBUS;
vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
-@@ -128,7 +125,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
+@@ -80,7 +77,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
return -EFAULT;
vdso_fix_landing(image, new_vma);
@@ -18855,19 +20982,7 @@ index f840766..222abb1 100644
return 0;
}
-@@ -193,6 +190,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
- .fault = vvar_fault,
- };
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ calculate_addr = false;
-+#endif
-+
- if (calculate_addr) {
- addr = vdso_addr(current->mm->start_stack,
- image->size - image->sym_vvar_start);
-@@ -204,15 +206,15 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
+@@ -154,15 +151,15 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
return -EINTR;
addr = get_unmapped_area(NULL, addr,
@@ -18886,48 +21001,56 @@ index f840766..222abb1 100644
/*
* MAYWRITE to allow gdb to COW and set breakpoints
-@@ -236,14 +238,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
- VM_PFNMAP,
- &vvar_mapping);
-
-- if (IS_ERR(vma)) {
-+ if (IS_ERR(vma))
- ret = PTR_ERR(vma);
-- goto up_fail;
-- }
+@@ -193,8 +190,8 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
up_fail:
- if (ret)
+ if (ret) {
- current->mm->context.vdso = NULL;
+- current->mm->context.vdso_image = NULL;
+ mm->context.vdso = 0;
++ mm->context.vdso_image = NULL;
+ }
up_write(&mm->mmap_sem);
- return ret;
-@@ -262,9 +262,6 @@ static int load_vdso32(void)
+@@ -248,7 +245,14 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
+
+ static int map_vdso_randomized(const struct vdso_image *image)
+ {
+- unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
++ unsigned long addr;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = 0;
++ else
++#endif
++ addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
+
+ return map_vdso(image, addr);
+ }
+@@ -292,8 +296,6 @@ static int load_vdso32(void)
#ifdef CONFIG_X86_64
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
- if (!vdso64_enabled)
- return 0;
--
- return map_vdso(&vdso_image_64, true);
- }
-@@ -273,12 +270,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+ return map_vdso_randomized(&vdso_image_64);
+ }
+@@ -303,11 +305,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp)
{
#ifdef CONFIG_X86_X32_ABI
- if (test_thread_flag(TIF_X32)) {
- if (!vdso64_enabled)
- return 0;
--
+ if (test_thread_flag(TIF_X32))
- return map_vdso(&vdso_image_x32, true);
+ return map_vdso_randomized(&vdso_image_x32);
- }
#endif
#ifdef CONFIG_IA32_EMULATION
return load_vdso32();
-@@ -295,15 +288,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+@@ -324,15 +323,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
#endif
#ifdef CONFIG_X86_64
@@ -18969,7 +21092,7 @@ index 636c4b3..666991b 100644
else if (!strcmp("none", str))
vsyscall_mode = NONE;
else
-@@ -271,8 +267,7 @@ do_ret:
+@@ -271,8 +267,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
return true;
sigsegv:
@@ -19051,10 +21174,10 @@ index b28200d..e93e14d 100644
while (amd_iommu_v2_event_descs[i].attr.attr.name)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
-index 7b0f1d9..09c8710 100644
+index 7fe88bb..afd1630 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
-@@ -1549,7 +1549,7 @@ static void __init pmu_check_apic(void)
+@@ -1570,7 +1570,7 @@ static void __init pmu_check_apic(void)
}
@@ -19063,7 +21186,7 @@ index 7b0f1d9..09c8710 100644
.name = "format",
.attrs = NULL,
};
-@@ -1680,7 +1680,7 @@ static struct attribute *events_attr[] = {
+@@ -1701,7 +1701,7 @@ static struct attribute *events_attr[] = {
NULL,
};
@@ -19072,7 +21195,7 @@ index 7b0f1d9..09c8710 100644
.name = "events",
.attrs = events_attr,
};
-@@ -2317,7 +2317,7 @@ static unsigned long get_segment_base(unsigned int segment)
+@@ -2325,7 +2325,7 @@ static unsigned long get_segment_base(unsigned int segment)
if (idx > GDT_ENTRIES)
return 0;
@@ -19081,7 +21204,7 @@ index 7b0f1d9..09c8710 100644
}
return get_desc_base(desc);
-@@ -2417,7 +2417,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
+@@ -2425,7 +2425,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
break;
perf_callchain_store(entry, frame.return_address);
@@ -19091,10 +21214,10 @@ index 7b0f1d9..09c8710 100644
pagefault_enable();
}
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
-index 3ef34c6..166e15a 100644
+index cb85222..bebbc92 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
-@@ -2408,6 +2408,8 @@ __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+@@ -2411,6 +2411,8 @@ __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
}
static void
@@ -19103,7 +21226,7 @@ index 3ef34c6..166e15a 100644
intel_start_scheduling(struct cpu_hw_events *cpuc)
{
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
-@@ -2417,14 +2419,18 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
+@@ -2420,14 +2422,18 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
/*
* nothing needed if in group validation mode
*/
@@ -19124,7 +21247,7 @@ index 3ef34c6..166e15a 100644
xl = &excl_cntrs->states[tid];
-@@ -2464,6 +2470,8 @@ static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cnt
+@@ -2467,6 +2473,8 @@ static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cnt
}
static void
@@ -19133,7 +21256,7 @@ index 3ef34c6..166e15a 100644
intel_stop_scheduling(struct cpu_hw_events *cpuc)
{
struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
-@@ -2473,13 +2481,18 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
+@@ -2476,13 +2484,18 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
/*
* nothing needed if in group validation mode
*/
@@ -19154,7 +21277,7 @@ index 3ef34c6..166e15a 100644
xl = &excl_cntrs->states[tid];
-@@ -2662,19 +2675,22 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
+@@ -2665,19 +2678,22 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
* unused now.
*/
if (hwc->idx >= 0) {
@@ -19179,8 +21302,8 @@ index 3ef34c6..166e15a 100644
raw_spin_unlock(&excl_cntrs->lock);
}
}
-@@ -3608,10 +3624,10 @@ __init int intel_pmu_init(void)
- x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
+@@ -3617,10 +3633,10 @@ __init int intel_pmu_init(void)
+ }
if (boot_cpu_has(X86_FEATURE_PDCM)) {
- u64 capabilities;
@@ -19236,10 +21359,10 @@ index 8f82b02..b10c4b0 100644
ret = perf_pmu_register(&intel_cqm_pmu, "intel_cqm", -1);
if (ret) {
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
-index 834262a..a1fc484 100644
+index fec8a46..0cc43ca 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
-@@ -95,14 +95,14 @@
+@@ -97,14 +97,14 @@
MODULE_LICENSE("GPL");
#define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
@@ -19258,10 +21381,10 @@ index 834262a..a1fc484 100644
static ssize_t cstate_get_attr_cpumask(struct device *dev,
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
-index 8fc714b..0ce11c3 100644
+index be20239..99d75dd 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
-@@ -601,7 +601,7 @@ unlock:
+@@ -601,7 +601,7 @@ int intel_pmu_drain_bts_buffer(void)
static inline void intel_pmu_drain_pebs_buffer(void)
{
@@ -19270,7 +21393,7 @@ index 8fc714b..0ce11c3 100644
x86_pmu.drain_pebs(&regs);
}
-@@ -909,7 +909,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
+@@ -947,7 +947,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned long from = cpuc->lbr_entries[0].from;
unsigned long old_to, to = cpuc->lbr_entries[0].to;
@@ -19279,7 +21402,7 @@ index 8fc714b..0ce11c3 100644
int is_64bit = 0;
void *kaddr;
int size;
-@@ -961,6 +961,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
+@@ -999,6 +999,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
} else {
kaddr = (void *)to;
}
@@ -19287,7 +21410,7 @@ index 8fc714b..0ce11c3 100644
do {
struct insn insn;
-@@ -1120,7 +1121,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
+@@ -1158,7 +1159,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
}
if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
@@ -19297,10 +21420,10 @@ index 8fc714b..0ce11c3 100644
} else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
regs->flags |= PERF_EFLAGS_EXACT;
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
-index 707d358..9eb1c4f 100644
+index 81b321a..ef54593 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
-@@ -811,7 +811,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
+@@ -805,7 +805,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
* Ensure we don't blindy read any address by validating it is
* a known text address.
*/
@@ -19309,7 +21432,7 @@ index 707d358..9eb1c4f 100644
addr = (void *)from;
/*
* Assume we can get the maximum possible size
-@@ -833,7 +833,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
+@@ -827,7 +827,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
#ifdef CONFIG_X86_64
is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
#endif
@@ -19319,10 +21442,10 @@ index 707d358..9eb1c4f 100644
if (!insn.opcode.got)
return X86_BR_ABORT;
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
-index 861a7d9..2ff89b2 100644
+index c5047b8..7297def 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
-@@ -172,11 +172,9 @@ static const struct attribute_group *pt_attr_groups[] = {
+@@ -174,11 +174,9 @@ static const struct attribute_group *pt_attr_groups[] = {
static int __init pt_pmu_hw_init(void)
{
@@ -19336,7 +21459,7 @@ index 861a7d9..2ff89b2 100644
long i;
rdmsrl(MSR_PLATFORM_INFO, reg);
-@@ -207,8 +205,6 @@ static int __init pt_pmu_hw_init(void)
+@@ -209,8 +207,6 @@ static int __init pt_pmu_hw_init(void)
pt_pmu.vmx = true;
}
@@ -19345,7 +21468,7 @@ index 861a7d9..2ff89b2 100644
for (i = 0; i < PT_CPUID_LEAVES; i++) {
cpuid_count(20, i,
&pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM],
-@@ -217,39 +213,25 @@ static int __init pt_pmu_hw_init(void)
+@@ -219,39 +215,25 @@ static int __init pt_pmu_hw_init(void)
&pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]);
}
@@ -19394,7 +21517,7 @@ index 861a7d9..2ff89b2 100644
#define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC | \
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
-index 2886593..f191122 100644
+index 0a535ce..b8d9b16 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -117,14 +117,14 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
@@ -19415,7 +21538,7 @@ index 2886593..f191122 100644
__ATTR(_name, 0444, __rapl_##_var##_show, NULL)
#define RAPL_CNTR_WIDTH 32
-@@ -533,7 +533,7 @@ static struct attribute *rapl_events_knl_attr[] = {
+@@ -535,7 +535,7 @@ static struct attribute *rapl_events_knl_attr[] = {
NULL,
};
@@ -19425,10 +21548,10 @@ index 2886593..f191122 100644
.attrs = NULL, /* patched at runtime */
};
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
-index 463dc7a..4c8d08b 100644
+index 19d646a..e20a9b2 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
-@@ -90,8 +90,8 @@ end:
+@@ -90,8 +90,8 @@ struct pci2phy_map *__find_pci2phy_map(int segment)
return map;
}
@@ -19439,7 +21562,7 @@ index 463dc7a..4c8d08b 100644
{
struct uncore_event_desc *event =
container_of(attr, struct uncore_event_desc, attr);
-@@ -819,7 +819,7 @@ static void uncore_types_exit(struct intel_uncore_type **types)
+@@ -798,7 +798,7 @@ static void uncore_types_exit(struct intel_uncore_type **types)
static int __init uncore_type_init(struct intel_uncore_type *type, bool setid)
{
struct intel_uncore_pmu *pmus;
@@ -19449,11 +21572,11 @@ index 463dc7a..4c8d08b 100644
size_t size;
int i, j;
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
-index 78b9c23..2f5c61e 100644
+index ad986c1..9bb7016 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
-@@ -122,9 +122,9 @@ struct intel_uncore_box {
- #define UNCORE_BOX_FLAG_INITIATED 0
+@@ -124,9 +124,9 @@ struct intel_uncore_box {
+ #define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */
struct uncore_event_desc {
- struct kobj_attribute attr;
@@ -19464,7 +21587,7 @@ index 78b9c23..2f5c61e 100644
struct pci2phy_map {
struct list_head list;
-@@ -134,8 +134,8 @@ struct pci2phy_map {
+@@ -136,8 +136,8 @@ struct pci2phy_map {
struct pci2phy_map *__find_pci2phy_map(int segment);
@@ -19475,7 +21598,7 @@ index 78b9c23..2f5c61e 100644
#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
{ \
-@@ -144,14 +144,14 @@ ssize_t uncore_event_show(struct kobject *kobj,
+@@ -146,14 +146,14 @@ ssize_t uncore_event_show(struct kobject *kobj,
}
#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
@@ -19494,10 +21617,10 @@ index 78b9c23..2f5c61e 100644
static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
-index 4ab002d..5a30205 100644
+index bcbb1d2..d2511bf 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
-@@ -801,7 +801,7 @@ static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
+@@ -804,7 +804,7 @@ static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip)
regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS;
if (regs->flags & X86_VM_MASK)
regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK);
@@ -19520,9 +21643,18 @@ index cb26f18..4f43f23 100644
set_fs(KERNEL_DS);
has_dumped = 1;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
-index 2f29f4e..ac453b4 100644
+index cb13c05..d63fa1e 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
+@@ -112,7 +112,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
+ return err;
+ }
+
+-asmlinkage long sys32_sigreturn(void)
++SYS32_SYSCALL_DEFINE0(sigreturn)
+ {
+ struct pt_regs *regs = current_pt_regs();
+ struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
@@ -123,7 +123,7 @@ asmlinkage long sys32_sigreturn(void)
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_COMPAT_NSIG_WORDS > 1
@@ -19532,6 +21664,15 @@ index 2f29f4e..ac453b4 100644
sizeof(frame->extramask))))
goto badframe;
+@@ -138,7 +138,7 @@ asmlinkage long sys32_sigreturn(void)
+ return 0;
+ }
+
+-asmlinkage long sys32_rt_sigreturn(void)
++SYS32_SYSCALL_DEFINE0(rt_sigreturn)
+ {
+ struct pt_regs *regs = current_pt_regs();
+ struct rt_sigframe_ia32 __user *frame;
@@ -243,7 +243,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
sp -= frame_size;
/* Align the stack pointer according to the i386 ABI,
@@ -19595,12 +21736,12 @@ index 2f29f4e..ac453b4 100644
+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
} put_user_catch(err);
- err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
+ err |= __copy_siginfo_to_user32(&frame->info, &ksig->info, false);
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
-index 719cd70..72af944 100644
+index 719cd70..113980a 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
-@@ -49,18 +49,26 @@
+@@ -49,18 +49,27 @@
#define AA(__x) ((unsigned long)(__x))
@@ -19612,12 +21753,14 @@ index 719cd70..72af944 100644
+ __builtin_memcpy((unsigned char *)&retval + sizeof low, &high, sizeof high);
+ return retval;
+}
++
- asmlinkage long sys32_truncate64(const char __user *filename,
+-asmlinkage long sys32_truncate64(const char __user *filename,
- unsigned long offset_low,
- unsigned long offset_high)
-+ unsigned int offset_low,
-+ unsigned int offset_high)
++SYS32_SYSCALL_DEFINE3(truncate64, const char __user *, filename,
++ unsigned int, offset_low,
++ unsigned int, offset_high)
{
- return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
+ return sys_truncate(filename, compose_loff(offset_high, offset_low));
@@ -19625,15 +21768,15 @@ index 719cd70..72af944 100644
-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
- unsigned long offset_high)
-+asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned int offset_low,
-+ unsigned int offset_high)
++SYS32_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd, unsigned int, offset_low,
++ unsigned int, offset_high)
{
- return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
+ return sys_ftruncate(fd, ((unsigned long) offset_high << 32) | offset_low);
}
/*
-@@ -69,8 +77,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
+@@ -69,8 +78,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
*/
static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
{
@@ -19644,8 +21787,94 @@ index 719cd70..72af944 100644
SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
-@@ -196,29 +204,29 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
- __u32 len_low, __u32 len_high, int advice)
+@@ -95,8 +104,8 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
+ return 0;
+ }
+
+-asmlinkage long sys32_stat64(const char __user *filename,
+- struct stat64 __user *statbuf)
++SYS32_SYSCALL_DEFINE2(stat64, const char __user *, filename,
++ struct stat64 __user *, statbuf)
+ {
+ struct kstat stat;
+ int ret = vfs_stat(filename, &stat);
+@@ -106,8 +115,8 @@ asmlinkage long sys32_stat64(const char __user *filename,
+ return ret;
+ }
+
+-asmlinkage long sys32_lstat64(const char __user *filename,
+- struct stat64 __user *statbuf)
++SYS32_SYSCALL_DEFINE2(lstat64, const char __user *, filename,
++ struct stat64 __user *, statbuf)
+ {
+ struct kstat stat;
+ int ret = vfs_lstat(filename, &stat);
+@@ -116,7 +125,7 @@ asmlinkage long sys32_lstat64(const char __user *filename,
+ return ret;
+ }
+
+-asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
++SYS32_SYSCALL_DEFINE2(fstat64, unsigned int, fd, struct stat64 __user *, statbuf)
+ {
+ struct kstat stat;
+ int ret = vfs_fstat(fd, &stat);
+@@ -125,8 +134,8 @@ asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
+ return ret;
+ }
+
+-asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename,
+- struct stat64 __user *statbuf, int flag)
++SYS32_SYSCALL_DEFINE4(fstatat, unsigned int, dfd, const char __user *, filename,
++ struct stat64 __user *, statbuf, int, flag)
+ {
+ struct kstat stat;
+ int error;
+@@ -152,7 +161,7 @@ struct mmap_arg_struct32 {
+ unsigned int offset;
+ };
+
+-asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
++SYS32_SYSCALL_DEFINE1(mmap, struct mmap_arg_struct32 __user *, arg)
+ {
+ struct mmap_arg_struct32 a;
+
+@@ -166,22 +175,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
+ a.offset>>PAGE_SHIFT);
+ }
+
+-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
+- int options)
++SYS32_SYSCALL_DEFINE3(waitpid, compat_pid_t, pid, unsigned int __user *, stat_addr,
++ int, options)
+ {
+ return compat_sys_wait4(pid, stat_addr, options, NULL);
+ }
+
+ /* warning: next two assume little endian */
+-asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count,
+- u32 poslo, u32 poshi)
++SYS32_SYSCALL_DEFINE5(pread, unsigned int, fd, char __user *, ubuf, u32, count,
++ u32, poslo, u32, poshi)
+ {
+ return sys_pread64(fd, ubuf, count,
+ ((loff_t)AA(poshi) << 32) | AA(poslo));
+ }
+
+-asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
+- u32 count, u32 poslo, u32 poshi)
++SYS32_SYSCALL_DEFINE5(pwrite, unsigned int, fd, const char __user *, ubuf,
++ u32, count, u32, poslo, u32, poshi)
+ {
+ return sys_pwrite64(fd, ubuf, count,
+ ((loff_t)AA(poshi) << 32) | AA(poslo));
+@@ -192,40 +201,40 @@ asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
+ * Some system calls that need sign extended arguments. This could be
+ * done by a generic wrapper.
+ */
+-long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
+- __u32 len_low, __u32 len_high, int advice)
++SYS32_SYSCALL_DEFINE6(fadvise64_64, int, fd, __u32, offset_low, __u32, offset_high,
++ __u32, len_low, __u32, len_high, int, advice)
{
return sys_fadvise64_64(fd,
- (((u64)offset_high)<<32) | offset_low,
@@ -19655,15 +21884,19 @@ index 719cd70..72af944 100644
advice);
}
- asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
- size_t count)
+-asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
+- size_t count)
++SYS32_SYSCALL_DEFINE4(readahead, int, fd, unsigned, off_lo, unsigned, off_hi,
++ size_t, count)
{
- return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
+ return sys_readahead(fd, compose_loff(off_hi, off_lo), count);
}
- asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
- unsigned n_low, unsigned n_hi, int flags)
+-asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
+- unsigned n_low, unsigned n_hi, int flags)
++SYS32_SYSCALL_DEFINE6(sync_file_range, int, fd, unsigned, off_low, unsigned, off_hi,
++ unsigned, n_low, unsigned, n_hi, int, flags)
{
return sys_sync_file_range(fd,
- ((u64)off_hi << 32) | off_low,
@@ -19672,18 +21905,22 @@ index 719cd70..72af944 100644
+ compose_loff(n_hi, n_low), flags);
}
- asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
+-asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
- size_t len, int advice)
-+ int len, int advice)
++SYS32_SYSCALL_DEFINE5(fadvise64, int, fd, unsigned, offset_lo, unsigned, offset_hi,
++ int, len, int, advice)
{
- return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
+ return sys_fadvise64_64(fd, compose_loff(offset_hi, offset_lo),
len, advice);
}
-@@ -226,6 +234,6 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
- unsigned offset_hi, unsigned len_lo,
- unsigned len_hi)
+-asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
+- unsigned offset_hi, unsigned len_lo,
+- unsigned len_hi)
++SYS32_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, unsigned, offset_lo,
++ unsigned, offset_hi, unsigned, len_lo,
++ unsigned, len_hi)
{
- return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
- ((u64)len_hi << 32) | len_lo);
@@ -19691,64 +21928,135 @@ index 719cd70..72af944 100644
+ compose_loff(len_hi, len_lo));
}
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
-index e7636ba..b9d3a6d 100644
+index e7636ba..f5c86c4 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
-@@ -4,6 +4,7 @@
+@@ -3,7 +3,9 @@
+
#ifdef __ASSEMBLY__
++#include <linux/linkage.h>
#include <asm/asm.h>
+#include <asm/irq_vectors.h>
#ifdef CONFIG_SMP
.macro LOCK_PREFIX
-@@ -18,6 +19,45 @@
+@@ -18,6 +20,114 @@
.endm
#endif
++.macro pax_force_retaddr_bts rip=0
+#ifdef KERNEXEC_PLUGIN
-+ .macro pax_force_retaddr_bts rip=0
+ btsq $63,\rip(%rsp)
-+ .endm
++#endif
++.endm
++
++#if defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS) && defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
++#error PAX: the KERNEXEC BTS and OR methods must not be enabled at once
++#endif
++
++.macro pax_force_retaddr rip=0
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
-+ .macro pax_force_retaddr rip=0, reload=0
+ btsq $63,\rip(%rsp)
-+ .endm
-+ .macro pax_force_fptr ptr
-+ btsq $63,\ptr
-+ .endm
-+ .macro pax_set_fptr_mask
-+ .endm
+#endif
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ .macro pax_force_retaddr rip=0, reload=0
-+ .if \reload
-+ pax_set_fptr_mask
-+ .endif
+ orq %r12,\rip(%rsp)
-+ .endm
-+ .macro pax_force_fptr ptr
++#endif
++.endm
++
++.macro pax_force_fptr ptr
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ btsq $63,\ptr
++#endif
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+ orq %r12,\ptr
-+ .endm
-+ .macro pax_set_fptr_mask
++#endif
++.endm
++
++.macro pax_set_fptr_mask
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+ movabs $0x8000000000000000,%r12
-+ .endm
+#endif
++.endm
++
++#ifdef CONFIG_PAX_RAP
++.macro rap_call target hash="" sym=""
++
++ jmp 2001f
++ .ifb \hash
++ __ASM_RAP_RET_HASH(__rap_hash_ret_\target)
++ .else
++ __ASM_RAP_RET_HASH(__rap_hash_ret_\hash)
++ .endif
++ .skip 8-(2002f-2001f),0xcc
++
++ .ifnb \sym
++ .globl \sym
++\sym :
++ .endif
++
++2001: call \target
++2002:
++.endm
++
++.macro rap_retloc caller
++ __ASM_RAP_RET_HASH(__rap_hash_ret_\caller)
++ .skip 8-(2002f-2001f),0xcc
++2001: call \caller
++2002:
++.endm
++
++.macro rap_ret func
++ ret
++.endm
++#endif
++
++.macro pax_direct_call_global target sym
++#ifdef CONFIG_PAX_RAP
++ rap_call \target, , \sym
+#else
-+ .macro pax_force_retaddr rip=0, reload=0
-+ .endm
-+ .macro pax_force_fptr ptr
-+ .endm
-+ .macro pax_force_retaddr_bts rip=0
-+ .endm
-+ .macro pax_set_fptr_mask
-+ .endm
++ .globl \sym
++\sym :
++ call \target
+#endif
++.endm
++
++.macro pax_indirect_call target extra
++#ifdef CONFIG_PAX_RAP
++ rap_call "*\target" hash=\extra
++#else
++ call *\target
++#endif
++.endm
++
++.macro pax_direct_call target
++#ifdef CONFIG_PAX_RAP
++ rap_call \target
++#else
++ call \target
++#endif
++.endm
++
++.macro pax_retloc caller
++#ifdef CONFIG_PAX_RAP
++ rap_retloc \caller
++#else
++#endif
++.endm
++
++.macro pax_ret func
++ pax_force_retaddr
++#ifdef CONFIG_PAX_RAP
++ rap_ret \func
++#else
++ ret
++#endif
++.endm
+
/*
* Issue one struct alt_instr descriptor entry (need to put it into
* the section .altinstructions, see below). This entry contains
-@@ -50,7 +90,7 @@
+@@ -50,7 +160,7 @@
altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
.popsection
@@ -19757,7 +22065,7 @@ index e7636ba..b9d3a6d 100644
143:
\newinstr
144:
-@@ -86,7 +126,7 @@
+@@ -86,7 +196,7 @@
altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
.popsection
@@ -19766,7 +22074,7 @@ index e7636ba..b9d3a6d 100644
143:
\newinstr1
144:
-@@ -95,6 +135,26 @@
+@@ -95,6 +205,26 @@
.popsection
.endm
@@ -19794,7 +22102,7 @@ index e7636ba..b9d3a6d 100644
#endif /* _ASM_X86_ALTERNATIVE_ASM_H */
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
-index e77a644..6bbec6f 100644
+index 1b02038..d42eaf6 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -7,6 +7,7 @@
@@ -19805,7 +22113,26 @@ index e77a644..6bbec6f 100644
/*
* Alternative inline assembly for SMP.
-@@ -137,7 +138,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+@@ -84,6 +85,18 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ }
+ #endif /* CONFIG_SMP */
+
++#ifdef CONFIG_PAX_RAP
++#define PAX_DIRECT_CALL(target) "rap_direct_call " target
++#define PAX_DIRECT_CALL_HASH(target, hash) "rap_direct_call " target " " hash
++#define PAX_INDIRECT_CALL(target, extra) "rap_indirect_call " target " " extra
++#define PAX_RET(extra) "rap_ret " extra
++#else
++#define PAX_DIRECT_CALL(target) "call " target
++#define PAX_DIRECT_CALL_HASH(target, hash) "call " target
++#define PAX_INDIRECT_CALL(target, extra) "call " target
++#define PAX_RET(extra) "ret"
++#endif
++
+ #define b_replacement(num) "664"#num
+ #define e_replacement(num) "665"#num
+
+@@ -137,7 +150,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature, 1) \
".popsection\n" \
@@ -19814,7 +22141,7 @@ index e77a644..6bbec6f 100644
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
".popsection"
-@@ -147,7 +148,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+@@ -147,7 +160,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
ALTINSTR_ENTRY(feature1, 1) \
ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" \
@@ -19823,7 +22150,27 @@ index e77a644..6bbec6f 100644
ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
".popsection"
-@@ -234,6 +235,35 @@ static inline int alternatives_text_reserved(void *start, void *end)
+@@ -206,7 +219,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+
+ /* Like alternative_io, but for replacing a direct call with another one. */
+ #define alternative_call(oldfunc, newfunc, feature, output, input...) \
+- asm volatile (ALTERNATIVE("call %P[old]", "call %P[new]", feature) \
++ asm volatile (ALTERNATIVE(PAX_DIRECT_CALL("%P[old]"), PAX_DIRECT_CALL("%P[new]"), feature) \
+ : output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
+
+ /*
+@@ -219,8 +232,8 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ output, input...) \
+ { \
+ register void *__sp asm(_ASM_SP); \
+- asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
+- "call %P[new2]", feature2) \
++ asm volatile (ALTERNATIVE_2(PAX_DIRECT_CALL("%P[old]"), PAX_DIRECT_CALL("%P[new1]"), feature1,\
++ PAX_DIRECT_CALL("%P[new2]"), feature2) \
+ : output, "+r" (__sp) \
+ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
+ [new2] "i" (newfunc2), ## input); \
+@@ -238,6 +251,35 @@ static inline int alternatives_text_reserved(void *start, void *end)
*/
#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
@@ -19860,7 +22207,7 @@ index e77a644..6bbec6f 100644
#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
-index 1243577..302ac39 100644
+index f5aaf6c..6f23982 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -49,7 +49,7 @@ static inline void generic_apic_probe(void)
@@ -19894,6 +22241,19 @@ index 93eebc63..6a64395 100644
"setc %%bl\n\t"
"popl %%ebp\n\t"
"popl %%edi\n\t"
+diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
+index 44b8762..59e9d90 100644
+--- a/arch/x86/include/asm/asm-prototypes.h
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -11,6 +11,8 @@
+ #include <asm/special_insns.h>
+ #include <asm/preempt.h>
+
++#include <asm/desc.h>
++
+ #ifndef CONFIG_X86_CMPXCHG64
+ extern void cmpxchg8b_emu(void);
+ #endif
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 7acb51c..46ba0b3 100644
--- a/arch/x86/include/asm/asm.h
@@ -20329,7 +22689,7 @@ index 14635c5..199ea31 100644
* @v: pointer to type int
*
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
-index 71d7705..99a1fe8 100644
+index 71d7705..02bb244 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -8,9 +8,17 @@
@@ -20351,6 +22711,15 @@ index 71d7705..99a1fe8 100644
#define ATOMIC64_INIT(val) { (val) }
#define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
+@@ -23,7 +31,7 @@ typedef struct {
+
+ #ifdef CONFIG_X86_CMPXCHG64
+ #define __alternative_atomic64(f, g, out, in...) \
+- asm volatile("call %P[func]" \
++ asm volatile(PAX_DIRECT_CALL("%P[func]") \
+ : out : [func] "i" (atomic64_##g##_cx8), ## in)
+
+ #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
@@ -36,21 +44,31 @@ typedef struct {
ATOMIC64_DECL_ONE(sym##_386)
@@ -20935,16 +23304,25 @@ index 48f99f1..26ab08a 100644
#ifdef CONFIG_X86_VSMP
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
-index 7b53743..5f207d2 100644
+index 7b53743..5745aa1 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
+@@ -16,7 +16,7 @@
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+-asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
++asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum) __rap_hash;
+
+ /*
+ * the same as csum_partial, but copies from src while it
@@ -30,6 +30,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
+ int len, __wsum sum,
-+ int *src_err_ptr, int *dst_err_ptr);
++ int *src_err_ptr, int *dst_err_ptr) __rap_hash;
+
+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
+ int len, __wsum sum,
@@ -20972,7 +23350,7 @@ index 7b53743..5f207d2 100644
clac();
return ret;
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
-index 9733361..49bda42 100644
+index 97848cd..9ccfae9 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -15,8 +15,12 @@ extern void __cmpxchg_wrong_size(void)
@@ -21021,18 +23399,40 @@ index 9733361..49bda42 100644
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
* Since this is generally used to protect other memory information, we
-@@ -166,6 +196,9 @@ extern void __add_wrong_size(void)
- #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
- #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
+@@ -162,6 +192,9 @@ extern void __add_wrong_size(void)
+ #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
+ #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
+
- #define __add(ptr, inc, lock) \
- ({ \
- __typeof__ (*(ptr)) __ret = (inc); \
+ #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
+ ({ \
+ bool __ret; \
+diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
+index e4959d0..c62dbc2 100644
+--- a/arch/x86/include/asm/cmpxchg_32.h
++++ b/arch/x86/include/asm/cmpxchg_32.h
+@@ -81,7 +81,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
+ __typeof__(*(ptr)) __old = (o); \
+ __typeof__(*(ptr)) __new = (n); \
+ alternative_io(LOCK_PREFIX_HERE \
+- "call cmpxchg8b_emu", \
++ PAX_DIRECT_CALL("cmpxchg8b_emu"), \
+ "lock; cmpxchg8b (%%esi)" , \
+ X86_FEATURE_CX8, \
+ "=A" (__ret), \
+@@ -97,7 +97,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
+ __typeof__(*(ptr)) __ret; \
+ __typeof__(*(ptr)) __old = (o); \
+ __typeof__(*(ptr)) __new = (n); \
+- alternative_io("call cmpxchg8b_emu", \
++ alternative_io(PAX_DIRECT_CALL("cmpxchg8b_emu"), \
+ "cmpxchg8b (%%esi)" , \
+ X86_FEATURE_CX8, \
+ "=A" (__ret), \
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
-index a188061..280d840 100644
+index 24118c0..55d73de 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -42,7 +42,11 @@ typedef u32 compat_uint_t;
@@ -21061,10 +23461,10 @@ index 1d2b69f..8ca35d6 100644
"5:\n"
".previous\n"
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
-index 92a8308..4e44144 100644
+index ed10b5b..95be661 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
-@@ -205,7 +205,8 @@
+@@ -206,7 +206,8 @@
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
@@ -21074,7 +23474,7 @@ index 92a8308..4e44144 100644
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-@@ -213,7 +214,7 @@
+@@ -214,7 +215,7 @@
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
@@ -21084,10 +23484,10 @@ index 92a8308..4e44144 100644
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
diff --git a/arch/x86/include/asm/crypto/camellia.h b/arch/x86/include/asm/crypto/camellia.h
-index bb93333..e3d3d57 100644
+index bb93333..ee113c0 100644
--- a/arch/x86/include/asm/crypto/camellia.h
+++ b/arch/x86/include/asm/crypto/camellia.h
-@@ -39,34 +39,35 @@ extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
+@@ -39,34 +39,41 @@ extern int xts_camellia_setkey(struct crypto_tfm *tfm, const u8 *key,
/* regular block cipher functions */
asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
const u8 *src, bool xor);
@@ -21109,6 +23509,12 @@ index bb93333..e3d3d57 100644
-asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+asmlinkage void camellia_ecb_dec_16way(void *ctx, u8 *dst,
const u8 *src);
++void roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd(void *ctx, u8 *dst, const u8 *src) __rap_hash;
++void roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab(void *ctx, u8 *dst, const u8 *src) __rap_hash;
++void roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd(void *ctx, u8 *dst, const u8 *src) __rap_hash;
++void roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab(void *ctx, u8 *dst, const u8 *src) __rap_hash;
++void __camellia_enc_blk16(void *ctx, u8 *dst, const u8 *src) __rap_hash;
++void __camellia_dec_blk16(void *ctx, u8 *dst, const u8 *src) __rap_hash;
-asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
+asmlinkage void camellia_cbc_dec_16way(void *ctx, u8 *dst,
@@ -21135,7 +23541,7 @@ index bb93333..e3d3d57 100644
__camellia_enc_blk(ctx, dst, src, false);
}
-@@ -76,9 +77,10 @@ static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
+@@ -76,9 +83,10 @@ static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
__camellia_enc_blk(ctx, dst, src, true);
}
@@ -21147,7 +23553,7 @@ index bb93333..e3d3d57 100644
__camellia_enc_blk_2way(ctx, dst, src, false);
}
-@@ -89,7 +91,7 @@ static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
+@@ -89,7 +97,7 @@ static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
}
/* glue helpers */
@@ -21183,10 +23589,10 @@ index 03bb106..9e7a45c 100644
struct common_glue_func_entry {
unsigned int num_blocks; /* number of blocks that @fn will process */
diff --git a/arch/x86/include/asm/crypto/serpent-avx.h b/arch/x86/include/asm/crypto/serpent-avx.h
-index 33c2b8a..586871f 100644
+index 33c2b8a..21976b7 100644
--- a/arch/x86/include/asm/crypto/serpent-avx.h
+++ b/arch/x86/include/asm/crypto/serpent-avx.h
-@@ -16,20 +16,20 @@ struct serpent_xts_ctx {
+@@ -16,20 +16,22 @@ struct serpent_xts_ctx {
struct serpent_ctx crypt_ctx;
};
@@ -21196,6 +23602,8 @@ index 33c2b8a..586871f 100644
-asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_ecb_dec_8way_avx(void *ctx, u8 *dst,
const u8 *src);
++void __serpent_enc_blk8_avx(void *ctx, u8 *dst, const u8 *src) __rap_hash;
++void __serpent_dec_blk8_avx(void *ctx, u8 *dst, const u8 *src) __rap_hash;
-asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_cbc_dec_8way_avx(void *ctx, u8 *dst,
@@ -21285,8 +23693,24 @@ index 878c51c..86fc65f 100644
extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
le128 *iv);
extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
+diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
+index 9476c04..8d1cda4 100644
+--- a/arch/x86/include/asm/current.h
++++ b/arch/x86/include/asm/current.h
+@@ -16,6 +16,11 @@ static __always_inline struct task_struct *get_current(void)
+
+ #define current get_current()
+
++#else
++
++#define GET_CURRENT(reg) \
++ _ASM_MOV PER_CPU_VAR(current_task),reg ;
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* _ASM_X86_CURRENT_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
-index 4e10d73..7319a47 100644
+index 12080d8..7319a47 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -4,6 +4,7 @@
@@ -21310,7 +23734,8 @@ index 4e10d73..7319a47 100644
extern struct desc_ptr idt_descr;
-extern gate_desc idt_table[];
--extern struct desc_ptr debug_idt_descr;
++extern gate_desc idt_table[IDT_ENTRIES];
+ extern const struct desc_ptr debug_idt_descr;
-extern gate_desc debug_idt_table[];
-
-struct gdt_page {
@@ -21318,8 +23743,6 @@ index 4e10d73..7319a47 100644
-} __attribute__((aligned(PAGE_SIZE)));
-
-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
-+extern gate_desc idt_table[IDT_ENTRIES];
-+extern const struct desc_ptr debug_idt_descr;
+extern gate_desc debug_idt_table[IDT_ENTRIES];
+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
@@ -21569,10 +23992,10 @@ index fe884e1..46149ae 100644
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
-index d0bb76d..bb192fc 100644
+index 389d700..fa51266 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
-@@ -151,6 +151,11 @@ static inline bool efi_is_native(void)
+@@ -150,6 +150,11 @@ static inline bool efi_is_native(void)
static inline bool efi_runtime_supported(void)
{
@@ -21948,7 +24371,7 @@ index 48df486..e32babd 100644
#endif /* _ASM_X86_FPU_H */
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
-index 19f30a8..d0561c13 100644
+index 430bacf..d0fbcf0 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -43,6 +43,7 @@
@@ -22046,13 +24469,13 @@ index b90e105..30a5950 100644
extern void elcr_set_level_irq(unsigned int irq);
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
-index 055ea99..7dabb68 100644
+index 67942b6..176a8b4 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
-@@ -43,7 +43,7 @@ struct hypervisor_x86 {
+@@ -46,7 +46,7 @@ struct hypervisor_x86 {
- /* X2APIC detection (run once per boot) */
- bool (*x2apic_available)(void);
+ /* pin current vcpu to specified physical cpu (run rarely) */
+ void (*pin_vcpu)(int);
-};
+} __do_const;
@@ -22072,7 +24495,7 @@ index 39bcefc..272d904 100644
extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic;
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
-index de25aad..dc04476 100644
+index d34bd37..08f3231 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -42,6 +42,7 @@
@@ -22137,23 +24560,34 @@ index de25aad..dc04476 100644
* Convert a virtual cached pointer to an uncached pointer
*/
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
-index 6ca9fd6..4c0aa55 100644
+index 6ca9fd6..4dbd5e2 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
-@@ -48,6 +48,8 @@
+@@ -48,6 +48,10 @@
#define IA32_SYSCALL_VECTOR 0x80
+#define X86_REFCOUNT_VECTOR 0x81 /* Refcount Overflow or Underflow Exception */
++//#define X86_RAP_CALL_VECTOR 0x82 /* RAP Indirect Call Violation Exception */
++//#define X86_RAP_RET_VECTOR 0x83 /* RAP Function Return Violation Exception */
+
/*
* Vectors 0x30-0x3f are used for ISA interrupts.
* round up to the next 16-vector boundary
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
-index b77f5ed..cbf5ec6 100644
+index ac7692d..90e119c 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
-@@ -23,11 +23,17 @@ static inline unsigned long native_save_fl(void)
+@@ -12,7 +12,7 @@
+ * Interrupt control:
+ */
+
+-static inline unsigned long native_save_fl(void)
++static inline asmlinkage unsigned long native_save_fl(void)
+ {
+ unsigned long flags;
+
+@@ -27,23 +27,29 @@ static inline unsigned long native_save_fl(void)
: /* no input */
: "memory");
@@ -22171,7 +24605,21 @@ index b77f5ed..cbf5ec6 100644
asm volatile("push %0 ; popf"
: /* no output */
:"g" (flags)
-@@ -137,6 +143,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
+ :"memory", "cc");
+ }
+
+-static inline void native_irq_disable(void)
++static inline asmlinkage void native_irq_disable(void)
+ {
+ asm volatile("cli": : :"memory");
+ }
+
+-static inline void native_irq_enable(void)
++static inline asmlinkage void native_irq_enable(void)
+ {
+ asm volatile("sti": : :"memory");
+ }
+@@ -141,6 +147,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
swapgs; \
sysretl
@@ -22183,6 +24631,21 @@ index b77f5ed..cbf5ec6 100644
#else
#define INTERRUPT_RETURN iret
#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+@@ -197,6 +208,14 @@ static inline int arch_irqs_disabled(void)
+ # define LOCKDEP_SYS_EXIT
+ # define LOCKDEP_SYS_EXIT_IRQ
+ #endif
++#else
++#ifdef CONFIG_TRACE_IRQFLAGS
++void trace_hardirqs_on_thunk(void);
++void trace_hardirqs_off_thunk(void);
++#endif
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void lockdep_sys_exit_thunk(void);
++#endif
+ #endif /* __ASSEMBLY__ */
+
+ #endif
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index d1d1e50..5bacb6d 100644
--- a/arch/x86/include/asm/kprobes.h
@@ -22373,10 +24836,10 @@ index 7511978..cf52573 100644
#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
-index 8bf766e..d800b61 100644
+index 9bd7ff5..d9c8715 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
-@@ -184,7 +184,7 @@ struct mca_msr_regs {
+@@ -187,7 +187,7 @@ struct mca_msr_regs {
u32 (*status) (int bank);
u32 (*addr) (int bank);
u32 (*misc) (int bank);
@@ -22407,7 +24870,7 @@ index 0000000..2bfd3ba
+
+#endif /* X86_MMAN_H */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
-index 1ea0bae..25de747 100644
+index 72198c6..2049a36 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -19,7 +19,19 @@ typedef struct {
@@ -22432,10 +24895,10 @@ index 1ea0bae..25de747 100644
atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
-index d8abfcf..721da30 100644
+index 8e0a9fe..2473467 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
-@@ -46,7 +46,7 @@ struct ldt_struct {
+@@ -47,7 +47,7 @@ struct ldt_struct {
* allocations, but it's not worth trying to optimize.
*/
struct desc_struct *entries;
@@ -22444,7 +24907,7 @@ index d8abfcf..721da30 100644
};
/*
-@@ -58,6 +58,23 @@ void destroy_context_ldt(struct mm_struct *mm);
+@@ -59,6 +59,23 @@ void destroy_context_ldt(struct mm_struct *mm);
static inline int init_new_context_ldt(struct task_struct *tsk,
struct mm_struct *mm)
{
@@ -22468,7 +24931,7 @@ index d8abfcf..721da30 100644
return 0;
}
static inline void destroy_context_ldt(struct mm_struct *mm) {}
-@@ -98,6 +115,20 @@ static inline void load_mm_ldt(struct mm_struct *mm)
+@@ -99,6 +116,20 @@ static inline void load_mm_ldt(struct mm_struct *mm)
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
@@ -22530,6 +24993,50 @@ index e3b7819..ba128ec 100644
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_RAP
+
#endif /* _ASM_X86_MODULE_H */
+diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h
+index e9355a8..f031759 100644
+--- a/arch/x86/include/asm/mutex_32.h
++++ b/arch/x86/include/asm/mutex_32.h
+@@ -30,7 +30,7 @@ do { \
+ \
+ asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
+ " jns 1f \n" \
+- " call " #fail_fn "\n" \
++ PAX_DIRECT_CALL(#fail_fn)"\n" \
+ "1:\n" \
+ : "=a" (dummy) \
+ : "a" (count) \
+@@ -76,7 +76,7 @@ do { \
+ \
+ asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
+ " jg 1f\n" \
+- " call " #fail_fn "\n" \
++ PAX_DIRECT_CALL(#fail_fn)"\n" \
+ "1:\n" \
+ : "=a" (dummy) \
+ : "a" (count) \
+diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
+index d985075..d2fc470 100644
+--- a/arch/x86/include/asm/mutex_64.h
++++ b/arch/x86/include/asm/mutex_64.h
+@@ -39,7 +39,7 @@ do { \
+ \
+ asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
+ " jns 1f \n" \
+- " call " #fail_fn "\n" \
++ PAX_DIRECT_CALL(#fail_fn)"\n" \
+ "1:" \
+ : "=D" (dummy) \
+ : "D" (v) \
+@@ -94,7 +94,7 @@ do { \
+ \
+ asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
+ " jg 1f\n" \
+- " call " #fail_fn "\n" \
++ PAX_DIRECT_CALL(#fail_fn)"\n" \
+ "1:" \
+ : "=D" (dummy) \
+ : "D" (v) \
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 5f2fc44..106caa6 100644
--- a/arch/x86/include/asm/nmi.h
@@ -22672,10 +25179,74 @@ index b3bebf9..2c3570f 100644
#define __phys_reloc_hide(x) (x)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
-index 2970d22..fce32bd 100644
+index ce93281..17b0fda 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
-@@ -509,7 +509,7 @@ static inline pmd_t __pmd(pmdval_t val)
+@@ -374,11 +374,11 @@ static inline pte_t __pte(pteval_t val)
+
+ if (sizeof(pteval_t) > sizeof(long))
+ ret = PVOP_CALLEE2(pteval_t,
+- pv_mmu_ops.make_pte,
++ pv_mmu_ops, make_pte,
+ val, (u64)val >> 32);
+ else
+ ret = PVOP_CALLEE1(pteval_t,
+- pv_mmu_ops.make_pte,
++ pv_mmu_ops, make_pte,
+ val);
+
+ return (pte_t) { .pte = ret };
+@@ -389,10 +389,10 @@ static inline pteval_t pte_val(pte_t pte)
+ pteval_t ret;
+
+ if (sizeof(pteval_t) > sizeof(long))
+- ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
++ ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops, pte_val,
+ pte.pte, (u64)pte.pte >> 32);
+ else
+- ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
++ ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops, pte_val,
+ pte.pte);
+
+ return ret;
+@@ -403,10 +403,10 @@ static inline pgd_t __pgd(pgdval_t val)
+ pgdval_t ret;
+
+ if (sizeof(pgdval_t) > sizeof(long))
+- ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
++ ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops, make_pgd,
+ val, (u64)val >> 32);
+ else
+- ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
++ ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops, make_pgd,
+ val);
+
+ return (pgd_t) { ret };
+@@ -417,10 +417,10 @@ static inline pgdval_t pgd_val(pgd_t pgd)
+ pgdval_t ret;
+
+ if (sizeof(pgdval_t) > sizeof(long))
+- ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
++ ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops, pgd_val,
+ pgd.pgd, (u64)pgd.pgd >> 32);
+ else
+- ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
++ ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops, pgd_val,
+ pgd.pgd);
+
+ return ret;
+@@ -496,24 +496,24 @@ static inline pmd_t __pmd(pmdval_t val)
+ pmdval_t ret;
+
+ if (sizeof(pmdval_t) > sizeof(long))
+- ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
++ ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops, make_pmd,
+ val, (u64)val >> 32);
+ else
+- ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
++ ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops, make_pmd,
+ val);
+
return (pmd_t) { ret };
}
@@ -22684,7 +25255,43 @@ index 2970d22..fce32bd 100644
{
pmdval_t ret;
-@@ -575,6 +575,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+ if (sizeof(pmdval_t) > sizeof(long))
+- ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
++ ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops, pmd_val,
+ pmd.pmd, (u64)pmd.pmd >> 32);
+ else
+- ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
++ ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops, pmd_val,
+ pmd.pmd);
+
+ return ret;
+@@ -536,10 +536,10 @@ static inline pud_t __pud(pudval_t val)
+ pudval_t ret;
+
+ if (sizeof(pudval_t) > sizeof(long))
+- ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
++ ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops, make_pud,
+ val, (u64)val >> 32);
+ else
+- ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
++ ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops, make_pud,
+ val);
+
+ return (pud_t) { ret };
+@@ -550,10 +550,10 @@ static inline pudval_t pud_val(pud_t pud)
+ pudval_t ret;
+
+ if (sizeof(pudval_t) > sizeof(long))
+- ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
++ ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops, pud_val,
+ pud.pud, (u64)pud.pud >> 32);
+ else
+- ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
++ ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops, pud_val,
+ pud.pud);
+
+ return ret;
+@@ -571,6 +571,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
val);
}
@@ -22703,7 +25310,7 @@ index 2970d22..fce32bd 100644
static inline void pgd_clear(pgd_t *pgdp)
{
set_pgd(pgdp, __pgd(0));
-@@ -659,6 +671,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+@@ -655,6 +667,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
pv_mmu_ops.set_fixmap(idx, phys, flags);
}
@@ -22724,43 +25331,230 @@ index 2970d22..fce32bd 100644
+
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
- #ifdef CONFIG_QUEUED_SPINLOCKS
-@@ -886,7 +913,7 @@ extern void default_banner(void);
+ static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
+@@ -665,7 +692,7 @@ static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
+
+ static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
+ {
+- PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
++ PVOP_VCALLEE1(pv_lock_ops, queued_spin_unlock, lock);
+ }
+
+ static __always_inline void pv_wait(u8 *ptr, u8 val)
+@@ -735,7 +762,7 @@ static __always_inline void pv_kick(int cpu)
+ */
+ #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
+ #define PV_CALLEE_SAVE_REGS_THUNK(func) \
+- extern typeof(func) __raw_callee_save_##func; \
++ extern typeof(func) __raw_callee_save_##func __rap_hash; \
+ \
+ asm(".pushsection .text;" \
+ ".globl " PV_THUNK_NAME(func) ";" \
+@@ -743,38 +770,42 @@ static __always_inline void pv_kick(int cpu)
+ PV_THUNK_NAME(func) ":" \
+ FRAME_BEGIN \
+ PV_SAVE_ALL_CALLER_REGS \
+- "call " #func ";" \
++ PAX_DIRECT_CALL_HASH(#func, PV_THUNK_NAME(func)) ";" \
+ PV_RESTORE_ALL_CALLER_REGS \
+ FRAME_END \
+- "ret;" \
++ PAX_RET(PV_THUNK_NAME(func))";" \
+ ".popsection")
+
+ /* Get a reference to a callee-save function */
+-#define PV_CALLEE_SAVE(func) \
+- ((struct paravirt_callee_save) { __raw_callee_save_##func })
++#define PV_CALLEE_SAVE(field, func) \
++ ((union paravirt_callee_save) { .field = __raw_callee_save_##func })
+
++#ifdef CONFIG_PAX_RAP
++#define __PV_IS_CALLEE_SAVE(field, func) PV_CALLEE_SAVE(field, func)
++#else
+ /* Promise that "func" already uses the right calling convention */
+-#define __PV_IS_CALLEE_SAVE(func) \
+- ((struct paravirt_callee_save) { func })
++#define __PV_IS_CALLEE_SAVE(field, func) \
++ ((union paravirt_callee_save) { .field = func })
++#endif
+
+ static inline notrace unsigned long arch_local_save_flags(void)
+ {
+- return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
++ return PVOP_CALLEE0(unsigned long, pv_irq_ops, save_fl);
+ }
+
+ static inline notrace void arch_local_irq_restore(unsigned long f)
+ {
+- PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
++ PVOP_VCALLEE1(pv_irq_ops, restore_fl, f);
+ }
+
+ static inline notrace void arch_local_irq_disable(void)
+ {
+- PVOP_VCALLEE0(pv_irq_ops.irq_disable);
++ PVOP_VCALLEE0(pv_irq_ops, irq_disable);
+ }
+
+ static inline notrace void arch_local_irq_enable(void)
+ {
+- PVOP_VCALLEE0(pv_irq_ops.irq_enable);
++ PVOP_VCALLEE0(pv_irq_ops, irq_enable);
+ }
+
+ static inline notrace unsigned long arch_local_irq_save(void)
+@@ -806,9 +837,9 @@ extern void default_banner(void);
+
+ #else /* __ASSEMBLY__ */
+
+-#define _PVSITE(ptype, clobbers, ops, word, algn) \
++#define _PVSITE(ptype, clobbers, word, algn, ...)\
+ 771:; \
+- ops; \
++ __VA_ARGS__; \
+ 772:; \
+ .pushsection .parainstructions,"a"; \
+ .align algn; \
+@@ -848,8 +879,10 @@ extern void default_banner(void);
+ COND_POP(set, CLBR_RAX, rax)
+
+ #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
+-#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
+-#define PARA_INDIRECT(addr) *addr(%rip)
++#define PARA_SITE(ptype, clobbers, ...) _PVSITE(ptype, clobbers, .quad, 8, __VA_ARGS__)
++#define PARA_INDIRECT(addr) addr(%rip)
++#define PV_INDIRECT_CALL(ops, OPS, addr) pax_indirect_call PARA_INDIRECT(pv_##ops##_ops+PV_##OPS##_##addr), pv_##ops##_ops.##addr
++#define PV_INDIRECT_CALL_CALLEE_SAVE(ops, OPS, addr) pax_indirect_call PARA_INDIRECT(pv_##ops##_ops+PV_##OPS##_##addr), pv_##ops##_ops.##addr##.##addr
+ #else
+ #define PV_SAVE_REGS(set) \
+ COND_PUSH(set, CLBR_EAX, eax); \
+@@ -863,30 +896,32 @@ extern void default_banner(void);
+ COND_POP(set, CLBR_EAX, eax)
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
- #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+-#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
-#define PARA_INDIRECT(addr) *%cs:addr
-+#define PARA_INDIRECT(addr) *%ss:addr
++#define PARA_SITE(ptype, clobbers, ...) _PVSITE(ptype, clobbers, .long, 4, __VA_ARGS__)
++#define PARA_INDIRECT(addr) %ss:addr
++#define PV_INDIRECT_CALL(ops, OPS, addr) pax_indirect_call PARA_INDIRECT(pv_##ops##_ops+PV_##OPS##_##addr), pv_##ops##_ops.##addr
++#define PV_INDIRECT_CALL_CALLEE_SAVE(ops, OPS, addr) pax_indirect_call PARA_INDIRECT(pv_##ops##_ops+PV_##OPS##_##addr), pv_##ops##_ops.##addr##.##addr
#endif
#define INTERRUPT_RETURN \
-@@ -944,6 +971,21 @@ extern void default_banner(void);
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
+- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
++ jmp *PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
+
+ #define DISABLE_INTERRUPTS(clobbers) \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
+- call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
++ PV_INDIRECT_CALL_CALLEE_SAVE(irq,IRQ,irq_disable); \
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+
+ #define ENABLE_INTERRUPTS(clobbers) \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
+- call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
++ PV_INDIRECT_CALL_CALLEE_SAVE(irq,IRQ,irq_enable); \
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+
+ #ifdef CONFIG_X86_32
+ #define GET_CR0_INTO_EAX \
+ push %ecx; push %edx; \
+- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
++ PV_INDIRECT_CALL(cpu,CPU,read_cr0); \
+ pop %edx; pop %ecx
+ #else /* !CONFIG_X86_32 */
+
+@@ -907,21 +942,36 @@ extern void default_banner(void);
+ */
+ #define SWAPGS \
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
+- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
++ PV_INDIRECT_CALL(cpu,CPU,swapgs) \
+ )
+
+ #define GET_CR2_INTO_RAX \
+- call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
++ PV_INDIRECT_CALL(mmu,MMU,read_cr2)
+
+ #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
+ PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
+ CLBR_NONE, \
+- call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
++ PV_INDIRECT_CALL(irq,IRQ,adjust_exception_frame))
+
+ #define USERGS_SYSRET64 \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
CLBR_NONE, \
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
++ jmp *PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
+
+#define GET_CR0_INTO_RDI \
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
++ PV_INDIRECT_CALL(cpu,CPU,read_cr0); \
+ mov %rax,%rdi
+
+#define SET_RDI_INTO_CR0 \
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++ PV_INDIRECT_CALL(cpu,CPU,write_cr0)
+
+#define GET_CR3_INTO_RDI \
-+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
++ PV_INDIRECT_CALL(mmu,MMU,read_cr3); \
+ mov %rax,%rdi
+
+#define SET_RDI_INTO_CR3 \
-+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
++ PV_INDIRECT_CALL(mmu,MMU,write_cr3)
+
#endif /* CONFIG_X86_32 */
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
-index 7fa9e77..aa09e68 100644
+index 0f400c0..3e5329e 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
-@@ -83,7 +83,7 @@ struct pv_init_ops {
+@@ -39,6 +39,7 @@
+
+ #ifndef __ASSEMBLY__
+
++#include <linux/linkage.h>
+ #include <asm/desc_defs.h>
+ #include <asm/kmap_types.h>
+ #include <asm/pgtable_types.h>
+@@ -51,14 +52,29 @@ struct mm_struct;
+ struct desc_struct;
+ struct task_struct;
+ struct cpumask;
++struct qspinlock;
+
+ /*
+ * Wrapper type for pointers to code which uses the non-standard
+- * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
++ * calling convention. See PV_CALLEE_SAVE_REGS_THUNK below.
+ */
+-struct paravirt_callee_save {
+- void *func;
+-};
++union paravirt_callee_save {
++ void (*queued_spin_unlock)(struct qspinlock *);
++
++ asmlinkage unsigned long (*save_fl)(void);
++ void (*restore_fl)(unsigned long);
++ asmlinkage void (*irq_disable)(void);
++ asmlinkage void (*irq_enable)(void);
++
++ pteval_t (*pte_val)(pte_t);
++ pte_t (*make_pte)(pteval_t);
++ pmdval_t (*pmd_val)(pmd_t);
++ pmd_t (*make_pmd)(pmdval_t);
++ pudval_t (*pud_val)(pud_t);
++ pud_t (*make_pud)(pmdval_t);
++ pgdval_t (*pgd_val)(pgd_t);
++ pgd_t (*make_pgd)(pgdval_t);
++} __no_const;
+
+ /* general info */
+ struct pv_info {
+@@ -83,7 +99,7 @@ struct pv_init_ops {
*/
unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
unsigned long addr, unsigned len);
@@ -22769,48 +25563,91 @@ index 7fa9e77..aa09e68 100644
struct pv_lazy_ops {
-@@ -91,12 +91,12 @@ struct pv_lazy_ops {
+@@ -91,12 +107,12 @@ struct pv_lazy_ops {
void (*enter)(void);
void (*leave)(void);
void (*flush)(void);
-};
-+} __no_randomize_layout;
++} __no_const __no_randomize_layout;
struct pv_time_ops {
unsigned long long (*sched_clock)(void);
unsigned long long (*steal_clock)(int cpu);
-};
-+} __no_const __no_randomize_layout;
++} __rap_hash __no_const __no_randomize_layout;
struct pv_cpu_ops {
/* hooks for various privileged instructions */
-@@ -178,7 +178,7 @@ struct pv_cpu_ops {
+@@ -177,7 +193,7 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
-};
-+} __no_const __no_randomize_layout;
++} __rap_hash __no_const __no_randomize_layout;
struct pv_irq_ops {
/*
-@@ -201,7 +201,7 @@ struct pv_irq_ops {
+@@ -189,10 +205,10 @@ struct pv_irq_ops {
+ * NOTE: These functions callers expect the callee to preserve
+ * more registers than the standard C calling convention.
+ */
+- struct paravirt_callee_save save_fl;
+- struct paravirt_callee_save restore_fl;
+- struct paravirt_callee_save irq_disable;
+- struct paravirt_callee_save irq_enable;
++ union paravirt_callee_save save_fl;
++ union paravirt_callee_save restore_fl;
++ union paravirt_callee_save irq_disable;
++ union paravirt_callee_save irq_enable;
+
+ void (*safe_halt)(void);
+ void (*halt)(void);
+@@ -200,7 +216,7 @@ struct pv_irq_ops {
#ifdef CONFIG_X86_64
void (*adjust_exception_frame)(void);
#endif
-};
-+} __no_randomize_layout;
++} __rap_hash __no_randomize_layout;
struct pv_mmu_ops {
unsigned long (*read_cr2)(void);
-@@ -285,6 +285,7 @@ struct pv_mmu_ops {
- struct paravirt_callee_save make_pud;
+@@ -259,11 +275,11 @@ struct pv_mmu_ops {
+ void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+
+- struct paravirt_callee_save pte_val;
+- struct paravirt_callee_save make_pte;
++ union paravirt_callee_save pte_val;
++ union paravirt_callee_save make_pte;
+
+- struct paravirt_callee_save pgd_val;
+- struct paravirt_callee_save make_pgd;
++ union paravirt_callee_save pgd_val;
++ union paravirt_callee_save make_pgd;
+
+ #if CONFIG_PGTABLE_LEVELS >= 3
+ #ifdef CONFIG_X86_PAE
+@@ -276,14 +292,15 @@ struct pv_mmu_ops {
+
+ void (*set_pud)(pud_t *pudp, pud_t pudval);
+
+- struct paravirt_callee_save pmd_val;
+- struct paravirt_callee_save make_pmd;
++ union paravirt_callee_save pmd_val;
++ union paravirt_callee_save make_pmd;
+
+ #if CONFIG_PGTABLE_LEVELS == 4
+- struct paravirt_callee_save pud_val;
+- struct paravirt_callee_save make_pud;
++ union paravirt_callee_save pud_val;
++ union paravirt_callee_save make_pud;
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
-@@ -296,7 +297,13 @@ struct pv_mmu_ops {
+@@ -295,7 +312,13 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
@@ -22821,16 +25658,21 @@ index 7fa9e77..aa09e68 100644
+ unsigned long (*pax_close_kernel)(void);
+#endif
+
-+} __no_randomize_layout;
++} __rap_hash __no_randomize_layout;
struct arch_spinlock;
#ifdef CONFIG_SMP
-@@ -318,11 +325,14 @@ struct pv_lock_ops {
- struct paravirt_callee_save lock_spinning;
- void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
- #endif /* !CONFIG_QUEUED_SPINLOCKS */
+@@ -306,15 +329,18 @@ struct qspinlock;
+
+ struct pv_lock_ops {
+ void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
+- struct paravirt_callee_save queued_spin_unlock;
++ union paravirt_callee_save queued_spin_unlock;
+
+ void (*wait)(u8 *ptr, u8 val);
+ void (*kick)(int cpu);
-};
-+} __no_randomize_layout;
++} __rap_hash __no_randomize_layout;
/* This contains all the paravirt structures: we get a convenient
* number for each function using the offset which we use to indicate
@@ -22842,7 +25684,7 @@ index 7fa9e77..aa09e68 100644
struct paravirt_patch_template {
struct pv_init_ops pv_init_ops;
struct pv_time_ops pv_time_ops;
-@@ -330,7 +340,7 @@ struct paravirt_patch_template {
+@@ -322,7 +348,7 @@ struct paravirt_patch_template {
struct pv_irq_ops pv_irq_ops;
struct pv_mmu_ops pv_mmu_ops;
struct pv_lock_ops pv_lock_ops;
@@ -22851,6 +25693,131 @@ index 7fa9e77..aa09e68 100644
extern struct pv_info pv_info;
extern struct pv_init_ops pv_init_ops;
+@@ -391,7 +417,7 @@ int paravirt_disable_iospace(void);
+ * offset into the paravirt_patch_template structure, and can therefore be
+ * freely converted back into a structure offset.
+ */
+-#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
++#define PARAVIRT_CALL(op) PAX_INDIRECT_CALL("*%c[paravirt_opptr]", #op) ";"
+
+ /*
+ * These macros are intended to wrap calls through one of the paravirt
+@@ -518,7 +544,7 @@ int paravirt_disable_iospace(void);
+ /* since this condition will never hold */ \
+ if (sizeof(rettype) > sizeof(unsigned long)) { \
+ asm volatile(pre \
+- paravirt_alt(PARAVIRT_CALL) \
++ paravirt_alt(PARAVIRT_CALL(op)) \
+ post \
+ : call_clbr, "+r" (__sp) \
+ : paravirt_type(op), \
+@@ -528,7 +554,7 @@ int paravirt_disable_iospace(void);
+ __ret = (rettype)((((u64)__edx) << 32) | __eax); \
+ } else { \
+ asm volatile(pre \
+- paravirt_alt(PARAVIRT_CALL) \
++ paravirt_alt(PARAVIRT_CALL(op)) \
+ post \
+ : call_clbr, "+r" (__sp) \
+ : paravirt_type(op), \
+@@ -544,8 +570,8 @@ int paravirt_disable_iospace(void);
+ ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
+ EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
+
+-#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
+- ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
++#define __PVOP_CALLEESAVE(rettype, op, func, pre, post, ...) \
++ ____PVOP_CALL(rettype, op.func.func, CLBR_RET_REG, \
+ PVOP_CALLEE_CLOBBERS, , \
+ pre, post, ##__VA_ARGS__)
+
+@@ -555,7 +581,7 @@ int paravirt_disable_iospace(void);
+ PVOP_VCALL_ARGS; \
+ PVOP_TEST_NULL(op); \
+ asm volatile(pre \
+- paravirt_alt(PARAVIRT_CALL) \
++ paravirt_alt(PARAVIRT_CALL(op)) \
+ post \
+ : call_clbr, "+r" (__sp) \
+ : paravirt_type(op), \
+@@ -569,8 +595,8 @@ int paravirt_disable_iospace(void);
+ VEXTRA_CLOBBERS, \
+ pre, post, ##__VA_ARGS__)
+
+-#define __PVOP_VCALLEESAVE(op, pre, post, ...) \
+- ____PVOP_VCALL(op.func, CLBR_RET_REG, \
++#define __PVOP_VCALLEESAVE(op, func, pre, post, ...) \
++ ____PVOP_VCALL(op.func.func, CLBR_RET_REG, \
+ PVOP_VCALLEE_CLOBBERS, , \
+ pre, post, ##__VA_ARGS__)
+
+@@ -581,10 +607,10 @@ int paravirt_disable_iospace(void);
+ #define PVOP_VCALL0(op) \
+ __PVOP_VCALL(op, "", "")
+
+-#define PVOP_CALLEE0(rettype, op) \
+- __PVOP_CALLEESAVE(rettype, op, "", "")
+-#define PVOP_VCALLEE0(op) \
+- __PVOP_VCALLEESAVE(op, "", "")
++#define PVOP_CALLEE0(rettype, op, func) \
++ __PVOP_CALLEESAVE(rettype, op, func, "", "")
++#define PVOP_VCALLEE0(op, func) \
++ __PVOP_VCALLEESAVE(op, func, "", "")
+
+
+ #define PVOP_CALL1(rettype, op, arg1) \
+@@ -592,10 +618,10 @@ int paravirt_disable_iospace(void);
+ #define PVOP_VCALL1(op, arg1) \
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
+
+-#define PVOP_CALLEE1(rettype, op, arg1) \
+- __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+-#define PVOP_VCALLEE1(op, arg1) \
+- __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
++#define PVOP_CALLEE1(rettype, op, func, arg1) \
++ __PVOP_CALLEESAVE(rettype, op, func, "", "", PVOP_CALL_ARG1(arg1))
++#define PVOP_VCALLEE1(op, func, arg1) \
++ __PVOP_VCALLEESAVE(op, func, "", "", PVOP_CALL_ARG1(arg1))
+
+
+ #define PVOP_CALL2(rettype, op, arg1, arg2) \
+@@ -605,11 +631,11 @@ int paravirt_disable_iospace(void);
+ __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+
+-#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
+- __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
++#define PVOP_CALLEE2(rettype, op, func, arg1, arg2) \
++ __PVOP_CALLEESAVE(rettype, op, func, "", "", PVOP_CALL_ARG1(arg1),\
+ PVOP_CALL_ARG2(arg2))
+-#define PVOP_VCALLEE2(op, arg1, arg2) \
+- __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
++#define PVOP_VCALLEE2(op, func, arg1, arg2) \
++ __PVOP_VCALLEESAVE(op, func, "", "", PVOP_CALL_ARG1(arg1), \
+ PVOP_CALL_ARG2(arg2))
+
+
+diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
+index 84f58de..610576f 100644
+--- a/arch/x86/include/asm/percpu.h
++++ b/arch/x86/include/asm/percpu.h
+@@ -493,7 +493,7 @@ do { \
+ bool __ret; \
+ typeof(pcp1) __o1 = (o1), __n1 = (n1); \
+ typeof(pcp2) __o2 = (o2), __n2 = (n2); \
+- alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
++ alternative_io("leaq %P1,%%rsi\n\t" PAX_DIRECT_CALL("this_cpu_cmpxchg16b_emu")"\n\t", \
+ "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \
+ X86_FEATURE_CX16, \
+ ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \
+@@ -501,6 +501,7 @@ do { \
+ "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \
+ __ret; \
+ })
++bool this_cpu_cmpxchg16b_emu(void *, void*, long, long, long, long) __rap_hash;
+
+ #define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
+ #define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index b6d4259..da6324e 100644
--- a/arch/x86/include/asm/pgalloc.h
@@ -23320,10 +26287,10 @@ index 1cc82ec..ba29fd8 100644
}
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
-index 6fdef9e..7cda9d5 100644
+index 3a26420..ad31bde 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
-@@ -67,11 +67,16 @@ typedef struct { pteval_t pte; } pte_t;
+@@ -69,11 +69,16 @@ typedef struct { pteval_t pte; } pte_t;
#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
@@ -23341,7 +26308,7 @@ index 6fdef9e..7cda9d5 100644
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
-index f1218f5..b0cafcd 100644
+index 8b4de22..eaf50b8 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -112,10 +112,14 @@
@@ -23433,7 +26400,7 @@ index f1218f5..b0cafcd 100644
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
-index 643eba4..0dbfcf5 100644
+index 2c1ebeb..69c4605 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -38,7 +38,7 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
@@ -23459,7 +26426,7 @@ index 17f2186..f394307 100644
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 63def95..3d8c203 100644
+index 984a7bf..c3e410d 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -135,7 +135,7 @@ struct cpuinfo_x86 {
@@ -23471,6 +26438,15 @@ index 63def95..3d8c203 100644
#define X86_VENDOR_INTEL 0
#define X86_VENDOR_CYRIX 1
+@@ -159,7 +159,7 @@ extern __u32 cpu_caps_cleared[NCAPINTS];
+ extern __u32 cpu_caps_set[NCAPINTS];
+
+ #ifdef CONFIG_SMP
+-DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
++DECLARE_PER_CPU_READ_ONLY(struct cpuinfo_x86, cpu_info);
+ #define cpu_data(cpu) per_cpu(cpu_info, cpu)
+ #else
+ #define cpu_info boot_cpu_data
@@ -205,9 +205,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
: "memory");
}
@@ -23507,15 +26483,23 @@ index 63def95..3d8c203 100644
/*
* Save the original ist values for checking stack pointers during debugging
-@@ -388,6 +398,7 @@ struct thread_struct {
+@@ -340,6 +350,7 @@ DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
+ DECLARE_INIT_PER_CPU(irq_stack_union);
+
+ DECLARE_PER_CPU(char *, irq_stack_ptr);
++DECLARE_PER_CPU(char *, irq_stack_ptr_lowmem);
+ DECLARE_PER_CPU(unsigned int, irq_count);
+ extern asmlinkage void ignore_sysret(void);
+ #else /* X86_64 */
+@@ -388,6 +399,7 @@ struct thread_struct {
unsigned short ds;
unsigned short fsindex;
unsigned short gsindex;
+ unsigned short ss;
#endif
- #ifdef CONFIG_X86_32
- unsigned long ip;
-@@ -404,6 +415,9 @@ struct thread_struct {
+
+ u32 status; /* thread synchronous flags */
+@@ -404,6 +416,9 @@ struct thread_struct {
unsigned long gs;
#endif
@@ -23525,12 +26509,12 @@ index 63def95..3d8c203 100644
/* Save middle states of ptrace breakpoints */
struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */
-@@ -424,18 +438,9 @@ struct thread_struct {
- /* Max allowed port in the bitmap, in bytes: */
+@@ -425,17 +440,11 @@ struct thread_struct {
unsigned io_bitmap_max;
-- mm_segment_t addr_limit;
--
+ mm_segment_t addr_limit;
++ unsigned long lowest_stack;
+
unsigned int sig_on_uaccess_err:1;
unsigned int uaccess_err:1; /* uaccess failed */
-
@@ -23544,8 +26528,8 @@ index 63def95..3d8c203 100644
+} __randomize_layout;
/*
- * Set IOPL bits in EFLAGS from given mask
-@@ -478,12 +483,8 @@ static inline void native_swapgs(void)
+ * Thread-synchronous status.
+@@ -487,12 +496,8 @@ static inline void native_swapgs(void)
static inline unsigned long current_top_of_stack(void)
{
@@ -23558,7 +26542,7 @@ index 63def95..3d8c203 100644
}
#ifdef CONFIG_PARAVIRT
-@@ -708,20 +709,29 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -717,20 +722,30 @@ static inline void spin_lock_prefetch(const void *x)
#define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
TOP_OF_KERNEL_STACK_PADDING)
@@ -23585,12 +26569,12 @@ index 63def95..3d8c203 100644
.sp0 = TOP_OF_INIT_STACK, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
-- .addr_limit = KERNEL_DS, \
+ .addr_limit = KERNEL_DS, \
+ .fpu.state = &init_fpregs_state, \
}
- extern unsigned long thread_saved_pc(struct task_struct *tsk);
-@@ -736,12 +746,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ /*
+@@ -743,12 +758,7 @@ static inline void spin_lock_prefetch(const void *x)
* "struct pt_regs" is possible, but they may contain the
* completely wrong values.
*/
@@ -23604,7 +26588,7 @@ index 63def95..3d8c203 100644
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
-@@ -755,13 +760,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -762,13 +772,13 @@ static inline void spin_lock_prefetch(const void *x)
* particular problem by preventing anything from being mapped
* at the maximum canonical address.
*/
@@ -23620,16 +26604,15 @@ index 63def95..3d8c203 100644
#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-@@ -773,7 +778,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
-
+@@ -781,6 +791,7 @@ static inline void spin_lock_prefetch(const void *x)
#define INIT_THREAD { \
.sp0 = TOP_OF_INIT_STACK, \
-- .addr_limit = KERNEL_DS, \
+ .addr_limit = KERNEL_DS, \
+ .fpu.state = &init_fpregs_state, \
}
- /*
-@@ -796,6 +801,10 @@ extern void start_thread(stru