summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony G. Basile <blueness@gentoo.org>2015-10-08 10:26:36 -0400
committerAnthony G. Basile <blueness@gentoo.org>2015-10-08 10:26:36 -0400
commitf4064d61a86ad6ef0907aab64f2e2769b86f1a7f (patch)
treee28e215208b3dc5cf0736753dd950875a3c2472e
parentgrsecurity-3.1-4.1.7-201509201149 (diff)
downloadhardened-patchset-f4064d61a86ad6ef0907aab64f2e2769b86f1a7f.tar.gz
hardened-patchset-f4064d61a86ad6ef0907aab64f2e2769b86f1a7f.tar.bz2
hardened-patchset-f4064d61a86ad6ef0907aab64f2e2769b86f1a7f.zip
grsecurity-3.1-4.2.3-20151007223020151007
-rw-r--r--4.2.3/0000_README (renamed from 4.1.7/0000_README)2
-rw-r--r--4.2.3/4420_grsecurity-3.1-4.2.3-201510072230.patch (renamed from 4.1.7/4420_grsecurity-3.1-4.1.7-201509201149.patch)78601
-rw-r--r--4.2.3/4425_grsec_remove_EI_PAX.patch (renamed from 4.1.7/4425_grsec_remove_EI_PAX.patch)2
-rw-r--r--4.2.3/4427_force_XATTR_PAX_tmpfs.patch (renamed from 4.1.7/4427_force_XATTR_PAX_tmpfs.patch)4
-rw-r--r--4.2.3/4430_grsec-remove-localversion-grsec.patch (renamed from 4.1.7/4430_grsec-remove-localversion-grsec.patch)0
-rw-r--r--4.2.3/4435_grsec-mute-warnings.patch (renamed from 4.1.7/4435_grsec-mute-warnings.patch)0
-rw-r--r--4.2.3/4440_grsec-remove-protected-paths.patch (renamed from 4.1.7/4440_grsec-remove-protected-paths.patch)0
-rw-r--r--4.2.3/4450_grsec-kconfig-default-gids.patch (renamed from 4.1.7/4450_grsec-kconfig-default-gids.patch)8
-rw-r--r--4.2.3/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 4.1.7/4465_selinux-avc_audit-log-curr_ip.patch)0
-rw-r--r--4.2.3/4470_disable-compat_vdso.patch (renamed from 4.1.7/4470_disable-compat_vdso.patch)2
-rw-r--r--4.2.3/4475_emutramp_default_on.patch (renamed from 4.1.7/4475_emutramp_default_on.patch)4
11 files changed, 39442 insertions, 39181 deletions
diff --git a/4.1.7/0000_README b/4.2.3/0000_README
index 29c7482..08d9f55 100644
--- a/4.1.7/0000_README
+++ b/4.2.3/0000_README
@@ -2,7 +2,7 @@ README
-----------------------------------------------------------------------------
Individual Patch Descriptions:
-----------------------------------------------------------------------------
-Patch: 4420_grsecurity-3.1-4.1.7-201509201149.patch
+Patch: 4420_grsecurity-3.1-4.2.3-201510072230.patch
From: http://www.grsecurity.net
Desc: hardened-sources base patch from upstream grsecurity
diff --git a/4.1.7/4420_grsecurity-3.1-4.1.7-201509201149.patch b/4.2.3/4420_grsecurity-3.1-4.2.3-201510072230.patch
index 4694239..b4b589d 100644
--- a/4.1.7/4420_grsecurity-3.1-4.1.7-201509201149.patch
+++ b/4.2.3/4420_grsecurity-3.1-4.2.3-201510072230.patch
@@ -235,7 +235,7 @@ index 9de9813..1462492 100644
+zconf.lex.c
zoffset.h
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
-index d2b1c40..3e90a74 100644
+index 13f888a..250729b 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
@@ -313,10 +313,10 @@ index d2b1c40..3e90a74 100644
A typical pattern in a Kbuild file looks like this:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
-index cd03a0f..b8d72be 100644
+index 1d6f045..2714987 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
-@@ -1223,6 +1223,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -1244,6 +1244,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
Default: 1024
@@ -330,7 +330,7 @@ index cd03a0f..b8d72be 100644
hashdist= [KNL,NUMA] Large hashes allocated during boot
are distributed across NUMA nodes. Defaults on
for 64-bit NUMA, off otherwise.
-@@ -2341,6 +2348,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2364,6 +2371,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noexec=on: enable non-executable mappings (default)
noexec=off: disable non-executable mappings
@@ -341,7 +341,7 @@ index cd03a0f..b8d72be 100644
nosmap [X86]
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.
-@@ -2639,6 +2650,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+@@ -2662,6 +2673,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
the specified number of seconds. This is to be used if
your oopses keep scrolling off the screen.
@@ -373,7 +373,7 @@ index cd03a0f..b8d72be 100644
pcd. [PARIDE]
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
-index c831001..1bfbbf6 100644
+index 6fccb69..60c7c7a 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -41,6 +41,7 @@ show up in /proc/sys/kernel:
@@ -406,10 +406,10 @@ index c831001..1bfbbf6 100644
A toggle value indicating if modules are allowed to be loaded
diff --git a/Makefile b/Makefile
-index b8591e5..1d9e8c0 100644
+index a6edbb1..5ac7686 100644
--- a/Makefile
+++ b/Makefile
-@@ -299,7 +299,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
HOSTCC = gcc
HOSTCXX = g++
HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
@@ -420,7 +420,7 @@ index b8591e5..1d9e8c0 100644
ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
-@@ -444,8 +446,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
+@@ -434,8 +436,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
# Rules shared between *config targets and build targets
# Basic helpers built in scripts/
@@ -431,7 +431,7 @@ index b8591e5..1d9e8c0 100644
$(Q)$(MAKE) $(build)=scripts/basic
$(Q)rm -f .tmp_quiet_recordmcount
-@@ -620,6 +622,74 @@ endif
+@@ -615,6 +617,74 @@ endif
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
@@ -506,7 +506,7 @@ index b8591e5..1d9e8c0 100644
ifdef CONFIG_READABLE_ASM
# Disable optimizations that make assembler listings hard to read.
# reorder blocks reorders the control in the function
-@@ -712,7 +782,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
+@@ -714,7 +784,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
else
KBUILD_CFLAGS += -g
endif
@@ -515,7 +515,7 @@ index b8591e5..1d9e8c0 100644
endif
ifdef CONFIG_DEBUG_INFO_DWARF4
KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
-@@ -884,7 +954,7 @@ export mod_sign_cmd
+@@ -886,7 +956,7 @@ export mod_sign_cmd
ifeq ($(KBUILD_EXTMOD),)
@@ -524,7 +524,7 @@ index b8591e5..1d9e8c0 100644
vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
-@@ -934,6 +1004,8 @@ endif
+@@ -936,6 +1006,8 @@ endif
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -533,7 +533,7 @@ index b8591e5..1d9e8c0 100644
$(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Handle descending into subdirectories listed in $(vmlinux-dirs)
-@@ -943,7 +1015,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
+@@ -945,7 +1017,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
# Error messages still appears in the original language
PHONY += $(vmlinux-dirs)
@@ -542,7 +542,7 @@ index b8591e5..1d9e8c0 100644
$(Q)$(MAKE) $(build)=$@
define filechk_kernel.release
-@@ -986,10 +1058,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
+@@ -988,10 +1060,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
archprepare: archheaders archscripts prepare1 scripts_basic
@@ -556,7 +556,7 @@ index b8591e5..1d9e8c0 100644
prepare: prepare0
# Generate some files
-@@ -1097,6 +1172,8 @@ all: modules
+@@ -1099,6 +1174,8 @@ all: modules
# using awk while concatenating to the final file.
PHONY += modules
@@ -565,7 +565,7 @@ index b8591e5..1d9e8c0 100644
modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
$(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
@$(kecho) ' Building modules, stage 2.';
-@@ -1112,7 +1189,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+@@ -1114,7 +1191,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
# Target to prepare building external modules
PHONY += modules_prepare
@@ -574,7 +574,7 @@ index b8591e5..1d9e8c0 100644
# Target to install modules
PHONY += modules_install
-@@ -1178,7 +1255,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
+@@ -1180,7 +1257,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
signing_key.priv signing_key.x509 x509.genkey \
extra_certificates signing_key.x509.keyid \
@@ -586,7 +586,7 @@ index b8591e5..1d9e8c0 100644
# clean - Delete most, but leave enough to build external modules
#
-@@ -1217,7 +1297,7 @@ distclean: mrproper
+@@ -1219,7 +1299,7 @@ distclean: mrproper
@find $(srctree) $(RCS_FIND_IGNORE) \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
@@ -595,7 +595,7 @@ index b8591e5..1d9e8c0 100644
-type f -print | xargs rm -f
-@@ -1383,6 +1463,8 @@ PHONY += $(module-dirs) modules
+@@ -1385,6 +1465,8 @@ PHONY += $(module-dirs) modules
$(module-dirs): crmodverdir $(objtree)/Module.symvers
$(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
@@ -604,7 +604,7 @@ index b8591e5..1d9e8c0 100644
modules: $(module-dirs)
@$(kecho) ' Building modules, stage 2.';
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-@@ -1523,17 +1605,21 @@ else
+@@ -1525,17 +1607,21 @@ else
target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
endif
@@ -630,7 +630,7 @@ index b8591e5..1d9e8c0 100644
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
%.symtypes: %.c prepare scripts FORCE
$(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
-@@ -1545,11 +1631,15 @@ endif
+@@ -1547,11 +1633,15 @@ endif
$(build)=$(build-dir)
# Make sure the latest headers are built for Documentation
Documentation/: headers_install
@@ -819,10 +819,10 @@ index 36dc91a..6769cb0 100644
return addr;
}
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
-index 9d0ac09..479a962 100644
+index 4a905bd..0a4da53 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
-@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
+@@ -52,6 +52,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
__reload_thread(pcb);
}
@@ -947,7 +947,7 @@ index 9d0ac09..479a962 100644
/*
* This routine handles page faults. It determines the address,
-@@ -133,8 +251,29 @@ retry:
+@@ -132,8 +250,29 @@ retry:
good_area:
si_code = SEGV_ACCERR;
if (cause < 0) {
@@ -979,10 +979,10 @@ index 9d0ac09..479a962 100644
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index 45df48b..952017a 100644
+index ede2526..9e12300 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -1716,7 +1716,7 @@ config ALIGNMENT_TRAP
+@@ -1770,7 +1770,7 @@ config ALIGNMENT_TRAP
config UACCESS_WITH_MEMCPY
bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
@@ -991,28 +991,14 @@ index 45df48b..952017a 100644
default y if CPU_FEROCEON
help
Implement faster copy_to_user and clear_user methods for CPU
-@@ -1951,6 +1951,7 @@ config XIP_PHYS_ADDR
- config KEXEC
+@@ -2006,6 +2006,7 @@ config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
depends on (!SMP || PM_SLEEP_SMP)
+ depends on !CPU_V7M
+ depends on !GRKERNSEC_KMEM
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
-diff --git a/arch/arm/Makefile b/arch/arm/Makefile
-index 985227c..8acc029 100644
---- a/arch/arm/Makefile
-+++ b/arch/arm/Makefile
-@@ -304,6 +304,9 @@ INSTALL_TARGETS = zinstall uinstall install
-
- PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
-
-+bootpImage uImage: zImage
-+zImage: Image
-+
- $(BOOT_TARGETS): vmlinux
- $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
-
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index e22c119..abe7041 100644
--- a/arch/arm/include/asm/atomic.h
@@ -1579,7 +1565,7 @@ index e22c119..abe7041 100644
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
-index d2f81e6..3c4dba5 100644
+index 6c2327e..85beac4 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -67,7 +67,7 @@
@@ -1615,7 +1601,7 @@ index 75fe66b..ba3dee4 100644
#endif
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
-index 2d46862..a35415b 100644
+index 4812cda..9da8116 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -116,7 +116,7 @@ struct cpu_cache_fns {
@@ -1653,15 +1639,17 @@ index 5233151..87a71fa 100644
/*
* Fold a partial checksum without adding pseudo headers
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
-index abb2c37..96db950 100644
+index 1692a05..1835802 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
-@@ -104,6 +104,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
-
- #define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-+#define xchg_unchecked(ptr,x) \
-+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+@@ -107,6 +107,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
+ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
+ sizeof(*(ptr))); \
+ })
++#define xchg_unchecked(ptr, x) ({ \
++ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
++ sizeof(*(ptr))); \
++})
#include <asm-generic/cmpxchg-local.h>
@@ -1774,7 +1762,7 @@ index de53547..52b9a28 100644
(unsigned long)(dest_buf) + (size)); \
\
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
-index 4e78065..f265b48 100644
+index 5eed828..365e018 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
@@ -1795,40 +1783,40 @@ index 4e78065..f265b48 100644
*uval = val;
return ret;
}
-@@ -93,6 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+@@ -94,6 +98,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
+ preempt_disable();
+ pax_open_userland();
+
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
-@@ -103,6 +109,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+@@ -104,6 +110,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ pax_close_userland();
+
*uval = val;
- return ret;
- }
-@@ -125,6 +133,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
- return -EFAULT;
+ preempt_enable();
- pagefault_disable(); /* implies preempt_disable() */
+@@ -131,6 +139,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+ preempt_disable();
+ #endif
+ pagefault_disable();
+ pax_open_userland();
switch (op) {
case FUTEX_OP_SET:
-@@ -146,6 +155,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+@@ -152,6 +161,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
ret = -ENOSYS;
}
+ pax_close_userland();
- pagefault_enable(); /* subsumes preempt_enable() */
-
- if (!ret) {
+ pagefault_enable();
+ #ifndef CONFIG_SMP
+ preempt_enable();
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index 83eb2f7..ed77159 100644
--- a/arch/arm/include/asm/kmap_types.h
@@ -2001,7 +1989,7 @@ index 5e68278..1869bae 100644
#define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
#define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
-index bfd662e..f6cbb02 100644
+index aeddd28..207745c 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -127,6 +127,9 @@
@@ -2013,7 +2001,7 @@ index bfd662e..f6cbb02 100644
+
/*
* These are the memory types, defined to be compatible with
- * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index a745a2a..481350a 100644
--- a/arch/arm/include/asm/pgtable-3level.h
@@ -2146,10 +2134,10 @@ index c25ef3e..735f14b 100644
extern struct psci_operations psci_ops;
extern struct smp_operations psci_smp_ops;
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
-index 18f5a55..5072a40 100644
+index 2f3ac1b..67182ae0 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
-@@ -107,7 +107,7 @@ struct smp_operations {
+@@ -108,7 +108,7 @@ struct smp_operations {
int (*cpu_disable)(unsigned int cpu);
#endif
#endif
@@ -2224,7 +2212,7 @@ index 5f833f7..76e6644 100644
}
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
-index 74b17d0..57a4bf4 100644
+index 74b17d0..7e6da4b 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -18,6 +18,7 @@
@@ -2353,10 +2341,13 @@ index 74b17d0..57a4bf4 100644
#ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
-+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
+-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
++extern unsigned long __must_check __size_overflow(3) ___copy_from_user(void *to, const void __user *from, unsigned long n);
++extern unsigned long __must_check __size_overflow(3) ___copy_to_user(void __user *to, const void *from, unsigned long n);
+
-+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
++static inline unsigned long __must_check __size_overflow(3) __copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long ret;
+
@@ -2378,10 +2369,9 @@ index 74b17d0..57a4bf4 100644
+ return ret;
+}
+
- extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
--extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
- extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
++extern unsigned long __must_check __size_overflow(3) __copy_to_user_std(void __user *to, const void *from, unsigned long n);
++extern unsigned long __must_check __size_overflow(2) ___clear_user(void __user *addr, unsigned long n);
++extern unsigned long __must_check __size_overflow(2) __clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
+{
@@ -2429,10 +2419,10 @@ index 5af0ed1..cea83883 100644
#define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
-index a88671c..1cc895e 100644
+index 5e5a51a..b21eeef 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
-@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops);
+@@ -58,7 +58,7 @@ EXPORT_SYMBOL(arm_delay_ops);
/* networking */
EXPORT_SYMBOL(csum_partial);
@@ -2441,7 +2431,7 @@ index a88671c..1cc895e 100644
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_ipv6_magic);
-@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero);
+@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
@@ -2468,10 +2458,10 @@ index 318da33..373689f 100644
/**
* arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
-index 570306c..c87f193 100644
+index cb4fb1e..dc7fcaf 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
-@@ -48,6 +48,87 @@
+@@ -50,6 +50,87 @@
9997:
.endm
@@ -2559,7 +2549,7 @@ index 570306c..c87f193 100644
.macro pabt_helper
@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
#ifdef MULTI_PABORT
-@@ -90,11 +171,15 @@
+@@ -92,11 +173,15 @@
* Invalid mode handlers
*/
.macro inv_entry, reason
@@ -2575,7 +2565,7 @@ index 570306c..c87f193 100644
mov r1, #\reason
.endm
-@@ -150,7 +235,11 @@ ENDPROC(__und_invalid)
+@@ -152,7 +237,11 @@ ENDPROC(__und_invalid)
.macro svc_entry, stack_hole=0, trace=1
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
@@ -2587,7 +2577,7 @@ index 570306c..c87f193 100644
#ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved
SPFIX( mov r0, sp )
-@@ -165,7 +254,12 @@ ENDPROC(__und_invalid)
+@@ -167,7 +256,12 @@ ENDPROC(__und_invalid)
ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
@@ -2600,7 +2590,7 @@ index 570306c..c87f193 100644
SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
-@@ -369,6 +463,9 @@ ENDPROC(__fiq_abt)
+@@ -371,6 +465,9 @@ ENDPROC(__fiq_abt)
.macro usr_entry, trace=1
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
@@ -2610,7 +2600,7 @@ index 570306c..c87f193 100644
sub sp, sp, #S_FRAME_SIZE
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
-@@ -479,7 +576,9 @@ __und_usr:
+@@ -481,7 +578,9 @@ __und_usr:
tst r3, #PSR_T_BIT @ Thumb mode?
bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4
@@ -2620,7 +2610,7 @@ index 570306c..c87f193 100644
ARM_BE8(rev r0, r0) @ little endian instruction
@ r0 = 32-bit ARM instruction which caused the exception
-@@ -513,11 +612,15 @@ __und_usr_thumb:
+@@ -515,11 +614,15 @@ __und_usr_thumb:
*/
.arch armv6t2
#endif
@@ -2636,7 +2626,7 @@ index 570306c..c87f193 100644
ARM_BE8(rev16 r0, r0) @ little endian instruction
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
-@@ -547,7 +650,8 @@ ENDPROC(__und_usr)
+@@ -549,7 +652,8 @@ ENDPROC(__und_usr)
*/
.pushsection .text.fixup, "ax"
.align 2
@@ -2646,7 +2636,7 @@ index 570306c..c87f193 100644
ret r9
.popsection
.pushsection __ex_table,"a"
-@@ -767,7 +871,7 @@ ENTRY(__switch_to)
+@@ -769,7 +873,7 @@ ENTRY(__switch_to)
THUMB( str lr, [ip], #4 )
ldr r4, [r2, #TI_TP_VALUE]
ldr r5, [r2, #TI_TP_VALUE + 4]
@@ -2655,7 +2645,7 @@ index 570306c..c87f193 100644
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
switch_tls r1, r4, r5, r3, r7
-@@ -776,7 +880,7 @@ ENTRY(__switch_to)
+@@ -778,7 +882,7 @@ ENTRY(__switch_to)
ldr r8, =__stack_chk_guard
ldr r7, [r7, #TSK_STACK_CANARY]
#endif
@@ -2665,7 +2655,7 @@ index 570306c..c87f193 100644
#endif
mov r5, r0
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
-index 4e7f40c..0f9ee2c 100644
+index b48dd4f..9f9a72f 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -11,18 +11,46 @@
@@ -2718,7 +2708,7 @@ index 4e7f40c..0f9ee2c 100644
.align 5
/*
* This is the fast syscall return path. We do as little as
-@@ -173,6 +201,12 @@ ENTRY(vector_swi)
+@@ -174,6 +202,12 @@ ENTRY(vector_swi)
USER( ldr scno, [lr, #-4] ) @ get SWI instruction
#endif
@@ -2832,10 +2822,10 @@ index 059c3da..8e45cfc 100644
flush_icache_range((unsigned long)base + offset, offset +
length);
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
-index 3637973..cb29657 100644
+index 29e2991..7bc5757 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
-@@ -444,7 +444,7 @@ __enable_mmu:
+@@ -467,7 +467,7 @@ __enable_mmu:
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
@@ -2845,19 +2835,29 @@ index 3637973..cb29657 100644
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
-index af791f4..3ff9821 100644
+index efdddcb..35e58f6 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
-@@ -38,12 +38,39 @@
+@@ -38,17 +38,47 @@
#endif
#ifdef CONFIG_MMU
-void *module_alloc(unsigned long size)
+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
{
-+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
+- void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
++ void *p;
++
++ if (!size || (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR))
+ return NULL;
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
++
++ p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
++ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
+ return p;
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
__builtin_return_address(0));
@@ -3079,7 +3079,7 @@ index ef9119f..31995a3 100644
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
if (secure_computing() == -1)
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
-index 1a4d232..2677169 100644
+index 3826935..8ed63ed 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -122,6 +122,7 @@ void machine_power_off(void)
@@ -3091,10 +3091,10 @@ index 1a4d232..2677169 100644
/*
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
-index 6c777e9..3d2d0ca 100644
+index 36c18b7..0d78292 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
-@@ -105,21 +105,23 @@ EXPORT_SYMBOL(elf_hwcap);
+@@ -108,21 +108,23 @@ EXPORT_SYMBOL(elf_hwcap);
unsigned int elf_hwcap2 __read_mostly;
EXPORT_SYMBOL(elf_hwcap2);
@@ -3123,7 +3123,7 @@ index 6c777e9..3d2d0ca 100644
EXPORT_SYMBOL(outer_cache);
#endif
-@@ -250,9 +252,13 @@ static int __get_cpu_architecture(void)
+@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
* Register 0 and check for VMSAv7 or PMSAv7 */
unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
@@ -3197,7 +3197,7 @@ index 423663e..bfeb0ff 100644
- return page;
-}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
-index f11d825..bbe686f 100644
+index 3d6b782..8b3baeb 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -76,7 +76,7 @@ enum ipi_msg_type {
@@ -3210,10 +3210,10 @@ index f11d825..bbe686f 100644
void __init smp_set_ops(struct smp_operations *ops)
{
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
-index 7a3be1d..b00c7de 100644
+index b10e136..cb5edf9 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
-@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = {
+@@ -64,7 +64,7 @@ static struct map_desc itcm_iomap[] __initdata = {
.virtual = ITCM_OFFSET,
.pfn = __phys_to_pfn(ITCM_OFFSET),
.length = 0,
@@ -3222,7 +3222,7 @@ index 7a3be1d..b00c7de 100644
}
};
-@@ -267,7 +267,9 @@ no_dtcm:
+@@ -362,7 +362,9 @@ no_dtcm:
start = &__sitcm_text;
end = &__eitcm_text;
ram = &__itcm_start;
@@ -3233,7 +3233,7 @@ index 7a3be1d..b00c7de 100644
start, end);
itcm_present = true;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
-index 3dce1a3..60e857f 100644
+index d358226..bfd4019 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
@@ -3264,7 +3264,7 @@ index 3dce1a3..60e857f 100644
if (signr)
do_exit(signr);
}
-@@ -878,7 +883,11 @@ void __init early_trap_init(void *vectors_base)
+@@ -870,7 +875,11 @@ void __init early_trap_init(void *vectors_base)
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
@@ -3309,7 +3309,7 @@ index 8b60fde..8d986dd 100644
# ifdef CONFIG_ARM_KERNMEM_PERMS
. = ALIGN(1<<SECTION_SHIFT);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
-index d9631ec..b0c966c 100644
+index f9c341c..7430436 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
@@ -3321,7 +3321,7 @@ index d9631ec..b0c966c 100644
static u8 kvm_next_vmid;
static DEFINE_SPINLOCK(kvm_vmid_lock);
-@@ -373,7 +373,7 @@ void force_vm_exit(const cpumask_t *mask)
+@@ -372,7 +372,7 @@ void force_vm_exit(const cpumask_t *mask)
*/
static bool need_new_vmid_gen(struct kvm *kvm)
{
@@ -3330,7 +3330,7 @@ index d9631ec..b0c966c 100644
}
/**
-@@ -406,7 +406,7 @@ static void update_vttbr(struct kvm *kvm)
+@@ -405,7 +405,7 @@ static void update_vttbr(struct kvm *kvm)
/* First user of a new VMID generation? */
if (unlikely(kvm_next_vmid == 0)) {
@@ -3339,7 +3339,7 @@ index d9631ec..b0c966c 100644
kvm_next_vmid = 1;
/*
-@@ -423,7 +423,7 @@ static void update_vttbr(struct kvm *kvm)
+@@ -422,7 +422,7 @@ static void update_vttbr(struct kvm *kvm)
kvm_call_hyp(__kvm_flush_vm_context);
}
@@ -3348,7 +3348,7 @@ index d9631ec..b0c966c 100644
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
-@@ -1098,7 +1098,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
+@@ -1110,7 +1110,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
/**
* Initialize Hyp-mode and memory mappings on all CPUs.
*/
@@ -3482,9 +3482,18 @@ index 8044591..c9b2609 100644
.const_udelay = __loop_const_udelay,
.udelay = __loop_udelay,
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
-index 3e58d71..029817c 100644
+index 4b39af2..9ae747d 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -85,7 +85,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
+ return 1;
+ }
+
+-static unsigned long noinline
++static unsigned long noinline __size_overflow(3)
+ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
+ {
+ int atomic;
@@ -136,7 +136,7 @@ out:
}
@@ -3494,6 +3503,15 @@ index 3e58d71..029817c 100644
{
/*
* This test is stubbed out of the main function above to keep
+@@ -150,7 +150,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ return __copy_to_user_memcpy(to, from, n);
+ }
+
+-static unsigned long noinline
++static unsigned long noinline __size_overflow(2)
+ __clear_user_memset(void __user *addr, unsigned long n)
+ {
+ if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
@@ -190,7 +190,7 @@ out:
return n;
}
@@ -3504,10 +3522,10 @@ index 3e58d71..029817c 100644
/* See rational for this in __copy_to_user() above. */
if (n < 64)
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
-index 7d23ce0..5ef383a 100644
+index f572219..2cf36d5 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
-@@ -738,8 +738,10 @@ void __init exynos_pm_init(void)
+@@ -732,8 +732,10 @@ void __init exynos_pm_init(void)
tmp |= pm_data->wake_disable_mask;
pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
@@ -3520,19 +3538,6 @@ index 7d23ce0..5ef383a 100644
register_syscore_ops(&exynos_pm_syscore_ops);
suspend_set_ops(&exynos_suspend_ops);
-diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
-index 0662087..004d163 100644
---- a/arch/arm/mach-keystone/keystone.c
-+++ b/arch/arm/mach-keystone/keystone.c
-@@ -27,7 +27,7 @@
-
- #include "keystone.h"
-
--static struct notifier_block platform_nb;
-+static notifier_block_no_const platform_nb;
- static unsigned long keystone_dma_pfn_offset __read_mostly;
-
- static int keystone_platform_notifier(struct notifier_block *nb,
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index e46e9ea..9141c83 100644
--- a/arch/arm/mach-mvebu/coherency.c
@@ -3603,7 +3608,7 @@ index 5305ec7..6d74045 100644
#include <asm/smp_scu.h>
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
-index 6833df4..3e059b2 100644
+index e1d2e99..d9b3177 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -330,7 +330,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
@@ -3616,10 +3621,10 @@ index 6833df4..3e059b2 100644
};
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
-index 166b18f..f985f04 100644
+index 4cb8fd9..5ce65bc 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
-@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od)
+@@ -504,7 +504,7 @@ void omap_device_delete(struct omap_device *od)
struct platform_device __init *omap_device_build(const char *pdev_name,
int pdev_id,
struct omap_hwmod *oh,
@@ -3628,7 +3633,7 @@ index 166b18f..f985f04 100644
{
struct omap_hwmod *ohs[] = { oh };
-@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
+@@ -532,7 +532,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
struct platform_device __init *omap_device_build_ss(const char *pdev_name,
int pdev_id,
struct omap_hwmod **ohs,
@@ -3657,7 +3662,7 @@ index 78c02b3..c94109a 100644
struct omap_device *omap_device_alloc(struct platform_device *pdev,
struct omap_hwmod **ohs, int oh_cnt);
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
-index 5286e77..fdd234c 100644
+index 486cc4d..8d1a0b7 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -199,10 +199,10 @@ struct omap_hwmod_soc_ops {
@@ -3720,6 +3725,74 @@ index ff0a68c..b312aa0 100644
pdev = omap_device_build(dev_name, id, oh, &pdata,
sizeof(struct omap_wd_timer_platform_data));
WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
+diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
+index b0790fc..71eb21f 100644
+--- a/arch/arm/mach-shmobile/platsmp-apmu.c
++++ b/arch/arm/mach-shmobile/platsmp-apmu.c
+@@ -22,6 +22,7 @@
+ #include <asm/proc-fns.h>
+ #include <asm/smp_plat.h>
+ #include <asm/suspend.h>
++#include <asm/pgtable.h>
+ #include "common.h"
+ #include "platsmp-apmu.h"
+
+@@ -233,6 +234,8 @@ static int shmobile_smp_apmu_enter_suspend(suspend_state_t state)
+
+ void __init shmobile_smp_apmu_suspend_init(void)
+ {
+- shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
++ pax_open_kernel();
++ *(void **)&shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
++ pax_close_kernel();
+ }
+ #endif
+diff --git a/arch/arm/mach-shmobile/pm-r8a7740.c b/arch/arm/mach-shmobile/pm-r8a7740.c
+index 34608fc..344d7c0 100644
+--- a/arch/arm/mach-shmobile/pm-r8a7740.c
++++ b/arch/arm/mach-shmobile/pm-r8a7740.c
+@@ -11,6 +11,7 @@
+ #include <linux/console.h>
+ #include <linux/io.h>
+ #include <linux/suspend.h>
++#include <asm/pgtable.h>
+
+ #include "common.h"
+ #include "pm-rmobile.h"
+@@ -117,7 +118,9 @@ static int r8a7740_enter_suspend(suspend_state_t suspend_state)
+
+ static void r8a7740_suspend_init(void)
+ {
+- shmobile_suspend_ops.enter = r8a7740_enter_suspend;
++ pax_open_kernel();
++ *(void **)&shmobile_suspend_ops.enter = r8a7740_enter_suspend;
++ pax_close_kernel();
+ }
+ #else
+ static void r8a7740_suspend_init(void) {}
+diff --git a/arch/arm/mach-shmobile/pm-sh73a0.c b/arch/arm/mach-shmobile/pm-sh73a0.c
+index a7e4668..83334f33 100644
+--- a/arch/arm/mach-shmobile/pm-sh73a0.c
++++ b/arch/arm/mach-shmobile/pm-sh73a0.c
+@@ -9,6 +9,7 @@
+ */
+
+ #include <linux/suspend.h>
++#include <asm/pgtable.h>
+ #include "common.h"
+
+ #ifdef CONFIG_SUSPEND
+@@ -20,7 +21,9 @@ static int sh73a0_enter_suspend(suspend_state_t suspend_state)
+
+ static void sh73a0_suspend_init(void)
+ {
+- shmobile_suspend_ops.enter = sh73a0_enter_suspend;
++ pax_open_kernel();
++ *(void **)&shmobile_suspend_ops.enter = sh73a0_enter_suspend;
++ pax_close_kernel();
+ }
+ #else
+ static void sh73a0_suspend_init(void) {}
diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
index 7469347..1ecc350 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
@@ -3746,7 +3819,7 @@ index 3b9098d..15b390f 100644
#include <linux/irq.h>
#include <linux/kernel.h>
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
-index 2cb587b..6ddfebf 100644
+index 8538910..2f39bc4 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -10,6 +10,7 @@
@@ -3757,24 +3830,6 @@ index 2cb587b..6ddfebf 100644
#include <linux/irqchip/arm-gic.h>
#include <linux/delay.h>
#include <linux/io.h>
-diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
-index 2dea8b5..6499da2 100644
---- a/arch/arm/mach-ux500/setup.h
-+++ b/arch/arm/mach-ux500/setup.h
-@@ -33,13 +33,6 @@ extern void ux500_timer_init(void);
- .type = MT_DEVICE, \
- }
-
--#define __MEM_DEV_DESC(x, sz) { \
-- .virtual = IO_ADDRESS(x), \
-- .pfn = __phys_to_pfn(x), \
-- .length = sz, \
-- .type = MT_MEMORY_RWX, \
--}
--
- extern struct smp_operations ux500_smp_ops;
- extern void ux500_cpu_die(unsigned int cpu);
-
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
index f66816c..228b951 100644
--- a/arch/arm/mach-zynq/platsmp.c
@@ -3788,7 +3843,7 @@ index f66816c..228b951 100644
#include "common.h"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
-index b4f92b9..ffefea9 100644
+index 7c6b976..055db09 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -446,6 +446,7 @@ config CPU_32v5
@@ -3807,7 +3862,7 @@ index b4f92b9..ffefea9 100644
help
This option enables or disables the use of domain switching
via the set_fs() function.
-@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS
+@@ -818,7 +820,7 @@ config NEED_KUSER_HELPERS
config KUSER_HELPERS
bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
@@ -3816,7 +3871,7 @@ index b4f92b9..ffefea9 100644
default y
help
Warning: disabling this option may break user programs.
-@@ -812,7 +814,7 @@ config KUSER_HELPERS
+@@ -832,7 +834,7 @@ config KUSER_HELPERS
See Documentation/arm/kernel_user_helpers.txt for details.
However, the fixed address nature of these helpers can be used
@@ -3891,12 +3946,12 @@ index 9769f1e..16aaa55 100644
goto fault; \
} while (0)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
-index e309c8f..f8965e8 100644
+index 71b3d33..8af9ade 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
-@@ -43,7 +43,7 @@ struct l2c_init_data {
- void (*save)(void __iomem *);
+@@ -44,7 +44,7 @@ struct l2c_init_data {
void (*configure)(void __iomem *);
+ void (*unlock)(void __iomem *, unsigned);
struct outer_cache_fns outer_cache;
-};
+} __do_const;
@@ -3952,7 +4007,7 @@ index 845769e..4278fd7 100644
atomic64_set(&mm->context.id, asid);
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
-index 6333d9c..3bb19f2 100644
+index 0d629b8..01867c8 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -25,6 +25,7 @@
@@ -4202,10 +4257,10 @@ index cf08bdf..772656c 100644
unsigned long search_exception_table(unsigned long addr);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
-index be92fa0..5252d7e 100644
+index 8a63b4c..6b04370 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
-@@ -709,7 +709,46 @@ void free_tcmmem(void)
+@@ -710,7 +710,46 @@ void free_tcmmem(void)
{
#ifdef CONFIG_HAVE_TCM
extern char __tcm_start, __tcm_end;
@@ -4253,10 +4308,10 @@ index be92fa0..5252d7e 100644
free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
#endif
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
-index d1e5ad7..84dcbf2 100644
+index 0c81056..97279f7 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
-@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
+@@ -405,9 +405,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
unsigned int mtype;
if (cached)
@@ -4378,7 +4433,7 @@ index 407dc78..047ce9d 100644
}
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
-index 7186382..0c145cf 100644
+index 870838a..070df1d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -41,6 +41,22 @@
@@ -4625,7 +4680,7 @@ index 7186382..0c145cf 100644
md->virtual >= PAGE_OFFSET &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
-@@ -1218,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
+@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
* called function. This means you can't use any function or debugging
* method which may touch any device, otherwise the kernel _will_ crash.
*/
@@ -4648,7 +4703,7 @@ index 7186382..0c145cf 100644
for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
-@@ -1242,7 +1296,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
@@ -4657,7 +4712,7 @@ index 7186382..0c145cf 100644
create_mapping(&map);
#endif
-@@ -1253,14 +1307,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
map.virtual = FLUSH_BASE;
map.length = SZ_1M;
@@ -4674,7 +4729,7 @@ index 7186382..0c145cf 100644
create_mapping(&map);
#endif
-@@ -1269,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
@@ -4683,7 +4738,7 @@ index 7186382..0c145cf 100644
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
#ifdef CONFIG_KUSER_HELPERS
-@@ -1329,8 +1383,10 @@ static void __init kmap_init(void)
+@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
struct memblock_region *reg;
@@ -4694,7 +4749,7 @@ index 7186382..0c145cf 100644
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
-@@ -1343,11 +1399,48 @@ static void __init map_lowmem(void)
+@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
if (start >= end)
break;
@@ -4744,7 +4799,7 @@ index 7186382..0c145cf 100644
create_mapping(&map);
} else if (start >= kernel_x_end) {
-@@ -1371,7 +1464,7 @@ static void __init map_lowmem(void)
+@@ -1377,7 +1470,7 @@ static void __init map_lowmem(void)
map.pfn = __phys_to_pfn(kernel_x_start);
map.virtual = __phys_to_virt(kernel_x_start);
map.length = kernel_x_end - kernel_x_start;
@@ -4753,7 +4808,7 @@ index 7186382..0c145cf 100644
create_mapping(&map);
-@@ -1384,6 +1477,7 @@ static void __init map_lowmem(void)
+@@ -1390,6 +1483,7 @@ static void __init map_lowmem(void)
create_mapping(&map);
}
}
@@ -4762,7 +4817,7 @@ index 7186382..0c145cf 100644
}
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
-index e0e2358..96c6791 100644
+index c011e22..92a0260 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -20,6 +20,7 @@
@@ -4773,7 +4828,7 @@ index e0e2358..96c6791 100644
#include "bpf_jit_32.h"
-@@ -72,7 +73,11 @@ struct jit_ctx {
+@@ -72,54 +73,38 @@ struct jit_ctx {
#endif
};
@@ -4783,9 +4838,62 @@ index e0e2358..96c6791 100644
int bpf_jit_enable __read_mostly;
+#endif
- static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
+-static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
+- unsigned int size)
+-{
+- void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
+-
+- if (!ptr)
+- return -EFAULT;
+- memcpy(ret, ptr, size);
+- return 0;
+-}
+-
+-static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
{
-@@ -179,8 +184,10 @@ static void jit_fill_hole(void *area, unsigned int size)
+ u8 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 1);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 1);
++ err = skb_copy_bits(skb, offset, &ret, 1);
+
+ return (u64)err << 32 | ret;
+ }
+
+-static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
+ {
+ u16 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 2);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 2);
++ err = skb_copy_bits(skb, offset, &ret, 2);
+
+ return (u64)err << 32 | ntohs(ret);
+ }
+
+-static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
+ {
+ u32 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 4);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 4);
++ err = skb_copy_bits(skb, offset, &ret, 4);
+
+ return (u64)err << 32 | ntohl(ret);
+ }
+@@ -199,8 +184,10 @@ static void jit_fill_hole(void *area, unsigned int size)
{
u32 *ptr;
/* We are guaranteed to have aligned memory. */
@@ -4796,30 +4904,35 @@ index e0e2358..96c6791 100644
}
static void build_prologue(struct jit_ctx *ctx)
-@@ -547,7 +554,7 @@ load_common:
- emit(ARM_SUB_I(r_scratch, r_skb_hl,
- 1 << load_order), ctx);
- emit(ARM_CMP_R(r_scratch, r_off), ctx);
-- condt = ARM_COND_HS;
-+ condt = ARM_COND_GE;
- } else {
- emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
+@@ -556,6 +543,9 @@ static int build_body(struct jit_ctx *ctx)
+ case BPF_LD | BPF_B | BPF_ABS:
+ load_order = 0;
+ load:
++ /* the interpreter will deal with the negative K */
++ if ((int)k < 0)
++ return -ENOTSUPP;
+ emit_mov_i(r_off, k, ctx);
+ load_common:
+ ctx->seen |= SEEN_DATA | SEEN_CALL;
+@@ -570,18 +560,6 @@ load_common:
condt = ARM_COND_HI;
-@@ -860,9 +867,11 @@ b_epilogue:
- off = offsetof(struct sk_buff, vlan_tci);
- emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
- if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
-- OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
-- else
-- OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
-+ OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
-+ else {
-+ OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
-+ OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
-+ }
- break;
- case BPF_ANC | SKF_AD_QUEUE:
- ctx->seen |= SEEN_SKB;
+ }
+
+- /*
+- * test for negative offset, only if we are
+- * currently scheduled to take the fast
+- * path. this will update the flags so that
+- * the slowpath instruction are ignored if the
+- * offset is negative.
+- *
+- * for loard_order == 0 the HI condition will
+- * make loads at offset 0 take the slow path too.
+- */
+- _emit(condt, ARM_CMP_I(r_off, 0), ctx);
+-
+ _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
+ ctx);
+
diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
index 5b217f4..c23f40e 100644
--- a/arch/arm/plat-iop/setup.c
@@ -4867,7 +4980,7 @@ index 7047051..44e8675 100644
#endif
#endif
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
-index 71f19c4..2b13cfe 100644
+index 0fa47c4..b167938 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -44,7 +44,7 @@
@@ -4933,7 +5046,7 @@ index 07e1ba44..ec8cbbb 100644
#define user_addr_max get_fs
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
-index b0bd4e5..54e82f6 100644
+index d16a1ce..a5acc60 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -134,7 +134,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
@@ -4998,7 +5111,7 @@ index 479330b..53717a8 100644
#endif /* __ASM_AVR32_KMAP_TYPES_H */
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
-index d223a8b..69c5210 100644
+index c035339..e1fa594 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
@@ -5195,10 +5308,10 @@ index 69952c18..4fa2908 100644
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
-index 76d25b2..d3793a0f 100644
+index 42a91a7..29d446e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
-@@ -541,6 +541,7 @@ source "drivers/sn/Kconfig"
+@@ -518,6 +518,7 @@ source "drivers/sn/Kconfig"
config KEXEC
bool "kexec system call"
depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
@@ -5237,7 +5350,7 @@ index 0bf0350..2ad1957 100644
+
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
-index f6769eb..1cdb590 100644
+index 843ba43..fa118fb 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -66,7 +66,7 @@
@@ -5433,10 +5546,10 @@ index 4f3fb6cc..254055e 100644
})
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
-index 29754aa..06d2838 100644
+index b15933c..098b1c8 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
-@@ -492,15 +492,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
+@@ -484,15 +484,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
}
static inline int
@@ -5478,7 +5591,7 @@ index 29754aa..06d2838 100644
}
static inline int
-@@ -683,7 +707,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
+@@ -675,7 +699,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
break;
case RV_BDREL:
@@ -5494,7 +5607,7 @@ index 29754aa..06d2838 100644
break;
case RV_LTV:
-@@ -818,15 +849,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
+@@ -810,15 +841,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
* addresses have been selected...
*/
uint64_t gp;
@@ -5562,10 +5675,10 @@ index 41e33f8..65180b2a 100644
}
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
-index 84f8a52..7c76178 100644
+index dc506b0..39baade 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
-@@ -192,7 +192,7 @@ SECTIONS {
+@@ -171,7 +171,7 @@ SECTIONS {
/* Per-cpu data: */
. = ALIGN(PERCPU_PAGE_SIZE);
PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
@@ -5575,7 +5688,7 @@ index 84f8a52..7c76178 100644
* ensure percpu data fits
* into percpu page size
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
-index ba5ba7a..36e9d3a 100644
+index 70b40d1..01a9a28 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
@@ -5626,10 +5739,10 @@ index ba5ba7a..36e9d3a 100644
/*
* If for any reason at all we couldn't handle the fault, make
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
-index 52b7604b..455cb85 100644
+index f50d4b3..c7975ee 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
-@@ -143,6 +143,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+@@ -138,6 +138,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
unsigned long pgoff, unsigned long flags)
{
struct vm_unmapped_area_info info;
@@ -5637,7 +5750,7 @@ index 52b7604b..455cb85 100644
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
-@@ -166,6 +167,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+@@ -161,6 +162,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
info.align_offset = 0;
@@ -5646,10 +5759,10 @@ index 52b7604b..455cb85 100644
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
-index a9b65cf..49ae1cf 100644
+index 97e48b0..fc59c36 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
-@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
+@@ -119,6 +119,19 @@ ia64_init_addr_space (void)
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
@@ -5669,7 +5782,7 @@ index a9b65cf..49ae1cf 100644
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
-@@ -286,7 +299,7 @@ static int __init gate_vma_init(void)
+@@ -279,7 +292,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -5736,7 +5849,7 @@ index 0395c51..5f26031 100644
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
-index d703d8e..a8e2d70 100644
+index 5a696e5..070490d 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -90,7 +90,7 @@ static inline void fence(void)
@@ -5749,10 +5862,10 @@ index d703d8e..a8e2d70 100644
#define smp_load_acquire(p) \
diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
-index 7ca80ac..794ba72 100644
+index 53f0f6c..2dc07fd 100644
--- a/arch/metag/mm/hugetlbpage.c
+++ b/arch/metag/mm/hugetlbpage.c
-@@ -194,6 +194,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
+@@ -189,6 +189,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & HUGEPT_MASK;
info.align_offset = 0;
@@ -5779,10 +5892,10 @@ index 4efe96a..60e8699 100644
#define SMP_CACHE_BYTES L1_CACHE_BYTES
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
-index a3b1ffe..7d61ca6 100644
+index 199a835..822b487 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2586,6 +2586,7 @@ source "kernel/Kconfig.preempt"
+@@ -2591,6 +2591,7 @@ source "kernel/Kconfig.preempt"
config KEXEC
bool "Kexec system call"
@@ -6409,7 +6522,7 @@ index 26d4363..3c9a82e 100644
/*
* atomic64_add_negative - add and test if negative
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
-index 2b8bbbc..4556df6 100644
+index 7ecba84..21774af 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -133,7 +133,7 @@
@@ -6602,7 +6715,7 @@ index b336037..5b874cc 100644
/*
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
-index 70f6e7f..11f4ada 100644
+index ae85694..4cdbba8 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -20,6 +20,9 @@
@@ -6658,10 +6771,10 @@ index 9c0014e..5101ef5 100644
/*
* We stash processor id into a COP0 register to retrieve it fast
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
-index bf8b324..cec5705 100644
+index 5305d69..1da2bf5 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
-@@ -130,6 +130,7 @@ extern u64 __ua_limit;
+@@ -146,6 +146,7 @@ static inline bool eva_kernel_access(void)
__ok == 0; \
})
@@ -6706,10 +6819,10 @@ index 9287678..f870e47 100644
#include <linux/module.h>
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
-index a74ec3a..4f06f18 100644
+index 74f6752..f3d7a47 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
-@@ -202,7 +202,7 @@ spurious_8259A_irq:
+@@ -205,7 +205,7 @@ spurious_8259A_irq:
printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
spurious_irq_mask |= irqmask;
}
@@ -6732,10 +6845,10 @@ index 44a1f79..2bd6aa3 100644
void __init gt641xx_irq_init(void)
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
-index 3c8a18a..b4929b6 100644
+index 8eb5af8..2baf465 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
-@@ -76,17 +76,17 @@ void ack_bad_irq(unsigned int irq)
+@@ -34,17 +34,17 @@ void ack_bad_irq(unsigned int irq)
printk("unexpected IRQ # %d\n", irq);
}
@@ -6756,7 +6869,7 @@ index 3c8a18a..b4929b6 100644
}
void __init init_IRQ(void)
-@@ -110,6 +110,8 @@ void __init init_IRQ(void)
+@@ -58,6 +58,8 @@ void __init init_IRQ(void)
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -6765,7 +6878,7 @@ index 3c8a18a..b4929b6 100644
static inline void check_stack_overflow(void)
{
unsigned long sp;
-@@ -125,6 +127,7 @@ static inline void check_stack_overflow(void)
+@@ -73,6 +75,7 @@ static inline void check_stack_overflow(void)
printk("do_IRQ: stack overflow: %ld\n",
sp - sizeof(struct thread_info));
dump_stack();
@@ -6906,10 +7019,10 @@ index 2242bdd..b284048 100644
}
/* Arrange for an interrupt in a short while */
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
-index 5f5f44e..cf10625 100644
+index 8ea28e6..c8873d5 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
-@@ -696,7 +696,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
+@@ -697,7 +697,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
siginfo_t info;
prev_state = exception_enter();
@@ -6930,10 +7043,10 @@ index 5f5f44e..cf10625 100644
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
-index 52f205a..335927c 100644
+index cd4c129..290c518 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
-@@ -1013,7 +1013,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+@@ -1016,7 +1016,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
return r;
}
@@ -6943,7 +7056,7 @@ index 52f205a..335927c 100644
if (kvm_mips_callbacks) {
kvm_err("kvm: module already exists\n");
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
-index 7ff8637..6004edb 100644
+index 852a41c..75b9d38 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -31,6 +31,23 @@
@@ -6970,7 +7083,7 @@ index 7ff8637..6004edb 100644
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
-@@ -206,6 +223,14 @@ bad_area:
+@@ -207,6 +224,14 @@ bad_area:
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
@@ -7527,7 +7640,7 @@ index 5aba01a..47cdd5a 100644
mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
-index 7f67c4c..d85c11d 100644
+index b99b39f..e3915ae 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -722,9 +722,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
@@ -7542,17 +7655,17 @@ index 7f67c4c..d85c11d 100644
fault_space = regs->iasq[0];
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
-index e5120e6..8ddb5cc 100644
+index 15503ad..4b1b8b6 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
-@@ -15,6 +15,7 @@
- #include <linux/sched.h>
+@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
+ #include <linux/uaccess.h>
+#include <linux/unistd.h>
- #include <asm/uaccess.h>
#include <asm/traps.h>
+
@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
static unsigned long
parisc_acctyp(unsigned long code, unsigned int inst)
@@ -7715,10 +7828,10 @@ index e5120e6..8ddb5cc 100644
/*
* If for any reason at all we couldn't handle the fault, make
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
-index 190cc48..48439ce 100644
+index 5ef2711..21be2c3 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -413,6 +413,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
+@@ -415,6 +415,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
config KEXEC
bool "kexec system call"
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
@@ -8240,7 +8353,7 @@ index 512d278..d31fadd 100644
2:"
: "=&r" (t)
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
-index a3bf5be..e03ba81 100644
+index 51ccc72..35de789 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -76,7 +76,7 @@
@@ -8414,7 +8527,7 @@ index 8565c25..2865190 100644
return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
}
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
-index 69c0598..2c56964 100644
+index 71294a6..9e40aca 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
@@ -8638,7 +8751,7 @@ index 7efee4a..48d47cc 100644
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
-index a0c071d..49cdc7f 100644
+index 2a8ebae..5643c6f 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -58,6 +58,7 @@
@@ -8818,10 +8931,10 @@ index a0c071d..49cdc7f 100644
static inline unsigned long clear_user(void __user *addr, unsigned long size)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
-index c1ebbda..fd8a98d 100644
+index 12868b1..5155667 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
-@@ -15,6 +15,11 @@ CFLAGS_prom_init.o += -fPIC
+@@ -14,6 +14,11 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
@@ -8833,7 +8946,7 @@ index c1ebbda..fd8a98d 100644
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
-@@ -27,6 +32,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
+@@ -26,6 +31,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
endif
@@ -8865,10 +8978,10 @@ index 3e68d1c..72a5ee6 100644
ld r4,_DAR(r1)
bl bad_page_fault
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
-index 9519e6b..13f6c38 100644
+index 0a0399c2..262a2e6 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
-@@ -1599,10 +1599,10 @@ handle_page_fault:
+@@ -1591,10 +1591,10 @@ handle_page_fault:
11: ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -8944,10 +9057,10 @@ index c94d2e0..992a9ce 100644
sechdrs, module);
#endif
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
-index febb50d..bb10020 100644
+index 64e6e9d..cf90ed5 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
-@@ -1036,8 +1036,8 @@ void show_regs(struct pt_regs * regs)
+@@ -1033,8 +1033,8 @@ void show_regs(struct pt_regs * regs)
* Lookup NIP late so we have the best change of getting the
* above info out without failing
*/
@@ -8958,7 +9071,7 @@ index febb50d..bb10020 100644
#endif
show_stack(current, (unsigned long *) regs->gpr[1]);
if (!user_mode(regs))
-@@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1550,10 +1550,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
@@ -8971,7 +9084,7 @@ index febb50d..bb10020 100644
(void *)current->ret_stack[curr_frame].ret);
curr_frame--;
}
-@@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+@@ -1573,7 +1573,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);
lr = regs->link;
@@ -8980,7 +9093,7 @@ index febb50d..bb10020 100644
regs->trap, (void *)regs->nip, (void *)lr);
firstframe = 1;
}
-@@ -1613,49 +1613,3 @@ void notrace __ppc64_runlatch_off(void)
+@@ -1609,49 +1609,3 @@ void notrace __ppc64_runlatch_off(void)
mtspr(SPRN_CTRLT, ctrl);
}
#endif /* CONFIG_PPC64 */
@@ -9096,7 +9209,7 @@ index c7c24d2..1bf7039 100644
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
-index 19e4744..28a8d7b 100644
+index 37de90f..12472ac 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -36,6 +36,7 @@
@@ -9126,7 +9239,7 @@ index 19e4744..28a8d7b 100644
do_exit(signr);
}
-@@ -1137,6 +1143,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
+@@ -1139,6 +1145,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
enum ctx_state prev_state = exception_enter();
unsigned int reason = get_reason(regs);
@@ -9154,7 +9267,7 @@ index 19e4744..28a8d7b 100644
* has no FPU, in that case the reason flags will be 0 */
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
-index 305eb0d..accc5b40 100644
+index b457bfa..9018cde 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -34,6 +34,7 @@
@@ -9165,7 +9278,7 @@ index 305eb0d..accc5b40 100644
#undef DEBUG
-@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+@@ -179,7 +180,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_base = VDSO32_MBASE;
#endif
@@ -9174,7 +9287,7 @@ index 305eb0d..accc5b40 100644
/* vDSO has a problem and was disabled, just don't "enable" it for the
* process
-@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+@@ -199,7 +200,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vdso_base = get_unmapped_area(NULL, vdso_base,
(vdso_pages << PAGE_SHIFT) +
((VDSO_ALIGNMENT - 1) & PAGE_MASK),
@@ -9184,10 +9297,10 @@ index 305eb0d..accc5b40 100644
rc = vdso_base;
goto fail_mmapsem;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
-index ac3ddf1..9a54c76 100644
+index e5dde32..557af3d 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
-@@ -1403,7 +1403,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
+@@ -1404,7 +1404,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
}
EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
@@ -9232,13 +9345,13 @@ index 5eea6f3..5d10396 100644
EXPORT_SYMBOL(copy_in_user);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
-index b396868..3eb6b9f 100644
+index a67c6d7..a662e6d 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
-@@ -33,6 +33,10 @@
- #include <linux/ratelimit.h>
+@@ -34,6 +34,10 @@
#include <linux/context_tracking.h>
#include <linux/hugetlb.h>
+ #include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
@@ -9289,7 +9402,7 @@ index b396868..3eb6b9f 100644
else
is_write = error_code & DSISR_ISSTORE;
#else
-@@ -383,12 +414,16 @@ good_area:
+@@ -384,12 +415,16 @@ good_area:
* "undefined". Of those that can be set, this is the only
* one which seems bad.
*/
@@ -9307,7 +9420,7 @@ index b396868..3eb6b9f 100644
/*
* Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing.
-@@ -483,6 +518,23 @@ bad_area:
+@@ -484,6 +519,23 @@ bad_area:
bad_area_nosemaphore:
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
@@ -9442,7 +9555,7 @@ index adbe380..adb7516 100644
+
#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
-index 8d72471..5322500 100644
+index e6f8615..4a66339 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -42,7 +42,7 @@
@@ -9501,7 +9614,7 @@ index c4a93d6..4d2a9b4 100644
#endif /* __ASM_EXEC_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
-index d64a7a6..0830329 100644
+index 9dd4cc4..36f4b84 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
@@ -9512,7 +9625,7 @@ index d64a7a6..0830329 100644
#define access_ok(type, addr, size) __access_ok(addr, size)
/*
-@@ -275,6 +276,10 @@ static inline unsigned long __must_check
+@@ -278,6 +279,10 @@ static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -9523,7 +9636,7 @@ index d64a7a6..0830329 100644
return __copy_to_user(to, from, n);
}
-@@ -303,10 +308,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
+@@ -307,10 +312,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
@@ -10012,7 +10125,7 @@ index 4082749..fd97781 100644
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
-index 7664894..45a974b 100644
+index 809941e..b443309 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
@@ -10427,7 +10540,7 @@ index 64ee103..388aef0 100644
}
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
-index a35194b..47dabc0d 100644
+index ea6e9a2..5703598 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -10,6 +10,7 @@
@@ -10438,7 +10551,7 @@ index a35194b..47dabc0d 100644
#include <asm/asi.h>
#include <asm/spitfire.h>
#include <asm-generic/uaccess-unaligned.h>
-@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
+@@ -76,6 +77,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
return 1;
}
@@ -10450,7 +10563,7 @@ index a35194b..47dabc0d 100644
static inline int access_ok(int type, const void __user * addr, unsigned long size)
{
return 1;
-@@ -228,8 +234,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
+@@ -250,8 +256,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long size)
{
@@ -10467,7 +10580,7 @@ index a35194b..47dabc0d 100644
if (unlikely(ret))
ret = copy_from_user_fixup(to, from, size);
-@@ -245,8 +258,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
+@@ -267,8 +280,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long size)
{
@@ -11293,13 +11406,13 @@ index 30c3ecc..736f015 100644
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
obj-y += fault_$(BITS).o
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
-index 70d8171..274c6c0 100644
+index c399e7b..2387414 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
-@@ -21,6 +21,9 @@
- #include <linux/perf_event.h>
+@@ -22,6 +22,9 @@
#include <linux/interrupt.h>
#include <linux/kdebug.h>
+ #include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
@@ -11610,13 +11723,13 @@ index 70d8171..274c6c0 100644
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
-index 4798232..f76e3aa 100644
+index dbabe57..d34d315 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
-@@ -22,6 +22,9 @@
- #include <linux/kdebug.h>
+@@ -23,6 +23,9 @@
#include <linux/percpu.h>
#include <linux/context_tracking.h>
+ #include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/compiler.h>
@@ -12130,7 +12243,7 @@ index 4798232..f76e3aa 100644
* load/store/atomic was a write or not, it only says that there
* was no match. So in such a case we (carefully) read the
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
-index 4242eab..9ae6360 100644
+index 131eaf4..285ea31 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
@@ -12236,7 +12349,7 @@ index 4242eab..9ae6360 100644
pte_t *huge_pte_alloc(struct mm_struct *mm,
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
-index 559cb74..9e5f097 100644
+index 4ac88b7..bac6cb2 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -187,9 +187,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
@@ -12274,10 +12387,10 @@ index 559cb74..9e5f097 100644
#endif /* CONFIG_DEBUG_DCFLUSH */
}
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
-index a07e31b..85c9003 100644
+index 9def1f5..cf0cabc 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
-@@ -198,6 +198,7 @@ source "kernel/Kconfig.hz"
+@@ -204,6 +204,7 @@ source "kernel/Kconfig.hz"
config KEXEC
bool "kexec system call"
@@ -12286,7 +12399,7 @@ index a07e31b..85c9003 100644
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
-index 7b11c5f..755a026 100644
+index 0496970..1a57e5f 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
@@ -12303,9 +12416,9 @@ index 7b11c5f..755a026 100644
+#define atomic64_dec_unchecked(v) atomic64_dec(v)
+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
+
- /* Define this to indicate that cmpxchg is an efficient operation. */
- #define __HAVE_ARCH_CMPXCHG
+ #endif /* !__ASSEMBLY__ */
+ #endif /* _ASM_TILE_ATOMIC_64_H */
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
index 6160761..00cac88 100644
--- a/arch/tile/include/asm/cache.h
@@ -12325,10 +12438,10 @@ index 6160761..00cac88 100644
/* bytes per L2 cache line */
#define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
-index f41cb53..31d3ab4 100644
+index 0a9c4265..bfb62d1 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
-@@ -417,9 +417,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
+@@ -429,9 +429,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
const void __user *from,
unsigned long n)
{
@@ -12341,10 +12454,10 @@ index f41cb53..31d3ab4 100644
else
copy_from_user_overflow();
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
-index 8416240..a012fb7 100644
+index c034dc3..cf1cc96 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
-@@ -179,6 +179,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+@@ -174,6 +174,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
@@ -12352,7 +12465,7 @@ index 8416240..a012fb7 100644
return vm_unmapped_area(&info);
}
-@@ -196,6 +197,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+@@ -191,6 +192,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
info.high_limit = current->mm->mmap_base;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
@@ -12361,12 +12474,12 @@ index 8416240..a012fb7 100644
/*
diff --git a/arch/um/Makefile b/arch/um/Makefile
-index 17d4460..9d74338e3de4 100644
+index 098ab33..fc54a33 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
-@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
- $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
- $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
+@@ -73,6 +73,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
+ -D_FILE_OFFSET_BITS=64 -idirafter include \
+ -D__KERNEL__ -D__UM_HOST__
+ifdef CONSTIFY_PLUGIN
+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
@@ -12478,28 +12591,34 @@ index ad8f795..2c7eec6 100644
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
-index 226d569..297bf74 100644
+index b3a1a5d..8dbc2d6 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -32,7 +32,7 @@ config X86
- select HAVE_AOUT if X86_32
- select HAVE_UNSTABLE_SCHED_CLOCK
- select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
-- select ARCH_SUPPORTS_INT128 if X86_64
-+ select ARCH_SUPPORTS_INT128 if X86_64 && !PAX_SIZE_OVERFLOW
- select HAVE_IDE
- select HAVE_OPROFILE
- select HAVE_PCSPKR_PLATFORM
-@@ -134,7 +134,7 @@ config X86
- select RTC_LIB
- select HAVE_DEBUG_STACKOVERFLOW
- select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
-- select HAVE_CC_STACKPROTECTOR
-+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
- select GENERIC_CPU_AUTOPROBE
- select HAVE_ARCH_AUDITSYSCALL
+@@ -35,13 +35,12 @@ config X86
+ select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SUPPORTS_ATOMIC_RMW
-@@ -266,7 +266,7 @@ config X86_HT
+ select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+- select ARCH_SUPPORTS_INT128 if X86_64
++ select ARCH_SUPPORTS_INT128 if X86_64 && !PAX_SIZE_OVERFLOW
+ select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF if X86_64
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
+- select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANT_FRAME_POINTERS
+ select ARCH_WANT_IPC_PARSE_VERSION if X86_32
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+@@ -85,7 +84,7 @@ config X86
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_BPF_JIT if X86_64
+- select HAVE_CC_STACKPROTECTOR
++ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
+ select HAVE_CMPXCHG_DOUBLE
+ select HAVE_CMPXCHG_LOCAL
+ select HAVE_CONTEXT_TRACKING if X86_64
+@@ -274,7 +273,7 @@ config X86_64_SMP
config X86_32_LAZY_GS
def_bool y
@@ -12508,7 +12627,7 @@ index 226d569..297bf74 100644
config ARCH_HWEIGHT_CFLAGS
string
-@@ -638,6 +638,7 @@ config SCHED_OMIT_FRAME_POINTER
+@@ -646,6 +645,7 @@ config SCHED_OMIT_FRAME_POINTER
menuconfig HYPERVISOR_GUEST
bool "Linux guest support"
@@ -12516,7 +12635,7 @@ index 226d569..297bf74 100644
---help---
Say Y here to enable options for running Linux under various hyper-
visors. This option enables basic hypervisor detection and platform
-@@ -1005,6 +1006,7 @@ config VM86
+@@ -1014,6 +1014,7 @@ config VM86
config X86_16BIT
bool "Enable support for 16-bit segments" if EXPERT
@@ -12524,7 +12643,7 @@ index 226d569..297bf74 100644
default y
---help---
This option is required by programs like Wine to run 16-bit
-@@ -1178,6 +1180,7 @@ choice
+@@ -1182,6 +1183,7 @@ choice
config NOHIGHMEM
bool "off"
@@ -12532,7 +12651,7 @@ index 226d569..297bf74 100644
---help---
Linux can use up to 64 Gigabytes of physical memory on x86 systems.
However, the address space of 32-bit x86 processors is only 4
-@@ -1214,6 +1217,7 @@ config NOHIGHMEM
+@@ -1218,6 +1220,7 @@ config NOHIGHMEM
config HIGHMEM4G
bool "4GB"
@@ -12540,7 +12659,7 @@ index 226d569..297bf74 100644
---help---
Select this if you have a 32-bit processor and between 1 and 4
gigabytes of physical RAM.
-@@ -1266,7 +1270,7 @@ config PAGE_OFFSET
+@@ -1270,7 +1273,7 @@ config PAGE_OFFSET
hex
default 0xB0000000 if VMSPLIT_3G_OPT
default 0x80000000 if VMSPLIT_2G
@@ -12549,7 +12668,7 @@ index 226d569..297bf74 100644
default 0x40000000 if VMSPLIT_1G
default 0xC0000000
depends on X86_32
-@@ -1286,7 +1290,6 @@ config X86_PAE
+@@ -1290,7 +1293,6 @@ config X86_PAE
config ARCH_PHYS_ADDR_T_64BIT
def_bool y
@@ -12557,7 +12676,7 @@ index 226d569..297bf74 100644
config ARCH_DMA_ADDR_T_64BIT
def_bool y
-@@ -1717,6 +1720,7 @@ source kernel/Kconfig.hz
+@@ -1724,6 +1726,7 @@ source kernel/Kconfig.hz
config KEXEC
bool "kexec system call"
@@ -12565,7 +12684,7 @@ index 226d569..297bf74 100644
---help---
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
-@@ -1899,7 +1903,9 @@ config X86_NEED_RELOCS
+@@ -1906,7 +1909,9 @@ config X86_NEED_RELOCS
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned"
@@ -12576,7 +12695,7 @@ index 226d569..297bf74 100644
range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help---
-@@ -1982,6 +1988,7 @@ config COMPAT_VDSO
+@@ -1989,6 +1994,7 @@ config COMPAT_VDSO
def_bool n
prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
depends on X86_32 || IA32_EMULATION
@@ -12584,7 +12703,7 @@ index 226d569..297bf74 100644
---help---
Certain buggy versions of glibc will crash if they are
presented with a 32-bit vDSO that is not mapped at the address
-@@ -2046,6 +2053,22 @@ config CMDLINE_OVERRIDE
+@@ -2053,6 +2059,22 @@ config CMDLINE_OVERRIDE
This is used to work around broken boot loaders. This should
be set to 'N' under normal conditions.
@@ -12639,7 +12758,7 @@ index 6983314..54ad7e8 100644
config X86_MINIMUM_CPU_FAMILY
int
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
-index 72484a6..83a4411 100644
+index d8c0d32..4ea2bb0 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -89,7 +89,7 @@ config EFI_PGT_DUMP
@@ -12661,7 +12780,7 @@ index 72484a6..83a4411 100644
This option helps catch unintended modifications to loadable
kernel module's text and read-only data. It also prevents execution
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
-index 2fda005..2c72d40 100644
+index 118e6de..e02efff 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
@@ -12674,7 +12793,7 @@ index 2fda005..2c72d40 100644
else
BITS := 64
UTS_MACHINE := x86_64
-@@ -107,6 +104,9 @@ else
+@@ -116,6 +113,9 @@ else
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
endif
@@ -12684,15 +12803,15 @@ index 2fda005..2c72d40 100644
# Make sure compiler does not have buggy stack-protector support.
ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
-@@ -181,6 +181,7 @@ archheaders:
- $(Q)$(MAKE) $(build)=arch/x86/syscalls all
+@@ -184,6 +184,7 @@ archheaders:
+ $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
archprepare:
+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
ifeq ($(CONFIG_KEXEC_FILE),y)
$(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
endif
-@@ -264,3 +265,9 @@ define archhelp
+@@ -267,3 +268,9 @@ define archhelp
echo ' FDARGS="..." arguments for the booted kernel'
echo ' FDINITRD=file initrd for the booted kernel'
endef
@@ -12881,7 +13000,7 @@ index b0c0d16..3b44ff8 100644
.quad 0x0000000000000000 /* TS continued */
gdt_end:
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
-index a107b93..55602de 100644
+index e28437e..6a17460 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
@@ -14599,101 +14718,1912 @@ index a350c99..c1bac24 100644
+ pax_force_retaddr
ret
ENDPROC(twofish_dec_blk)
-diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
-index ae6aad1..719d6d9 100644
---- a/arch/x86/ia32/ia32_aout.c
-+++ b/arch/x86/ia32/ia32_aout.c
-@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
- unsigned long dump_start, dump_size;
- struct user32 dump;
+diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
+index f4e6308..7ba29a1 100644
+--- a/arch/x86/entry/calling.h
++++ b/arch/x86/entry/calling.h
+@@ -93,23 +93,26 @@ For 32-bit we have the following conventions - kernel is built with
+ .endm
-+ memset(&dump, 0, sizeof(dump));
+ .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq %r12, R12+\offset(%rsp)
++#endif
+ .if \r11
+- movq %r11, 6*8+\offset(%rsp)
++ movq %r11, R11+\offset(%rsp)
+ .endif
+ .if \r8910
+- movq %r10, 7*8+\offset(%rsp)
+- movq %r9, 8*8+\offset(%rsp)
+- movq %r8, 9*8+\offset(%rsp)
++ movq %r10, R10+\offset(%rsp)
++ movq %r9, R9+\offset(%rsp)
++ movq %r8, R8+\offset(%rsp)
+ .endif
+ .if \rax
+- movq %rax, 10*8+\offset(%rsp)
++ movq %rax, RAX+\offset(%rsp)
+ .endif
+ .if \rcx
+- movq %rcx, 11*8+\offset(%rsp)
++ movq %rcx, RCX+\offset(%rsp)
+ .endif
+- movq %rdx, 12*8+\offset(%rsp)
+- movq %rsi, 13*8+\offset(%rsp)
+- movq %rdi, 14*8+\offset(%rsp)
++ movq %rdx, RDX+\offset(%rsp)
++ movq %rsi, RSI+\offset(%rsp)
++ movq %rdi, RDI+\offset(%rsp)
+ .endm
+ .macro SAVE_C_REGS offset=0
+ SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
+@@ -128,76 +131,87 @@ For 32-bit we have the following conventions - kernel is built with
+ .endm
+
+ .macro SAVE_EXTRA_REGS offset=0
+- movq %r15, 0*8+\offset(%rsp)
+- movq %r14, 1*8+\offset(%rsp)
+- movq %r13, 2*8+\offset(%rsp)
+- movq %r12, 3*8+\offset(%rsp)
+- movq %rbp, 4*8+\offset(%rsp)
+- movq %rbx, 5*8+\offset(%rsp)
++ movq %r15, R15+\offset(%rsp)
++ movq %r14, R14+\offset(%rsp)
++ movq %r13, R13+\offset(%rsp)
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq %r12, R12+\offset(%rsp)
++#endif
++ movq %rbp, RBP+\offset(%rsp)
++ movq %rbx, RBX+\offset(%rsp)
+ .endm
+ .macro SAVE_EXTRA_REGS_RBP offset=0
+- movq %rbp, 4*8+\offset(%rsp)
++ movq %rbp, RBP+\offset(%rsp)
+ .endm
+
+ .macro RESTORE_EXTRA_REGS offset=0
+- movq 0*8+\offset(%rsp), %r15
+- movq 1*8+\offset(%rsp), %r14
+- movq 2*8+\offset(%rsp), %r13
+- movq 3*8+\offset(%rsp), %r12
+- movq 4*8+\offset(%rsp), %rbp
+- movq 5*8+\offset(%rsp), %rbx
++ movq R15+\offset(%rsp), %r15
++ movq R14+\offset(%rsp), %r14
++ movq R13+\offset(%rsp), %r13
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq R12+\offset(%rsp), %r12
++#endif
++ movq RBP+\offset(%rsp), %rbp
++ movq RBX+\offset(%rsp), %rbx
+ .endm
+
+ .macro ZERO_EXTRA_REGS
+ xorl %r15d, %r15d
+ xorl %r14d, %r14d
+ xorl %r13d, %r13d
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+ xorl %r12d, %r12d
++#endif
+ xorl %ebp, %ebp
+ xorl %ebx, %ebx
+ .endm
+
+- .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
++ .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1, rstor_r12=1
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ .if \rstor_r12
++ movq R12(%rsp), %r12
++ .endif
++#endif
+ .if \rstor_r11
+- movq 6*8(%rsp), %r11
++ movq R11(%rsp), %r11
+ .endif
+ .if \rstor_r8910
+- movq 7*8(%rsp), %r10
+- movq 8*8(%rsp), %r9
+- movq 9*8(%rsp), %r8
++ movq R10(%rsp), %r10
++ movq R9(%rsp), %r9
++ movq R8(%rsp), %r8
+ .endif
+ .if \rstor_rax
+- movq 10*8(%rsp), %rax
++ movq RAX(%rsp), %rax
+ .endif
+ .if \rstor_rcx
+- movq 11*8(%rsp), %rcx
++ movq RCX(%rsp), %rcx
+ .endif
+ .if \rstor_rdx
+- movq 12*8(%rsp), %rdx
++ movq RDX(%rsp), %rdx
+ .endif
+- movq 13*8(%rsp), %rsi
+- movq 14*8(%rsp), %rdi
++ movq RSI(%rsp), %rsi
++ movq RDI(%rsp), %rdi
+ .endm
+ .macro RESTORE_C_REGS
+- RESTORE_C_REGS_HELPER 1,1,1,1,1
++ RESTORE_C_REGS_HELPER 1,1,1,1,1,1
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_RAX
+- RESTORE_C_REGS_HELPER 0,1,1,1,1
++ RESTORE_C_REGS_HELPER 0,1,1,1,1,0
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_RCX
+- RESTORE_C_REGS_HELPER 1,0,1,1,1
++ RESTORE_C_REGS_HELPER 1,0,1,1,1,0
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_R11
+- RESTORE_C_REGS_HELPER 1,1,0,1,1
++ RESTORE_C_REGS_HELPER 1,1,0,1,1,1
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_RCX_R11
+- RESTORE_C_REGS_HELPER 1,0,0,1,1
++ RESTORE_C_REGS_HELPER 1,0,0,1,1,1
+ .endm
+ .macro RESTORE_RSI_RDI
+- RESTORE_C_REGS_HELPER 0,0,0,0,0
++ RESTORE_C_REGS_HELPER 0,0,0,0,0,1
+ .endm
+ .macro RESTORE_RSI_RDI_RDX
+- RESTORE_C_REGS_HELPER 0,0,0,0,1
++ RESTORE_C_REGS_HELPER 0,0,0,0,1,1
+ .endm
+
+ .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 21dc60a..844def1 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -157,13 +157,154 @@
+ movl \reg, PT_GS(%esp)
+ .endm
+ .macro SET_KERNEL_GS reg
+
- fs = get_fs();
- set_fs(KERNEL_DS);
- has_dumped = 1;
-diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
-index c81d35e6..3500144 100644
---- a/arch/x86/ia32/ia32_signal.c
-+++ b/arch/x86/ia32/ia32_signal.c
-@@ -216,7 +216,7 @@ asmlinkage long sys32_sigreturn(void)
- if (__get_user(set.sig[0], &frame->sc.oldmask)
- || (_COMPAT_NSIG_WORDS > 1
- && __copy_from_user((((char *) &set.sig) + 4),
-- &frame->extramask,
-+ frame->extramask,
- sizeof(frame->extramask))))
- goto badframe;
++#ifdef CONFIG_CC_STACKPROTECTOR
+ movl $(__KERNEL_STACK_CANARY), \reg
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS), \reg
++#else
++ xorl \reg, \reg
++#endif
++
+ movl \reg, %gs
+ .endm
-@@ -335,7 +335,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
- sp -= frame_size;
- /* Align the stack pointer according to the i386 ABI,
- * i.e. so that on function entry ((sp + 4) & 15) == 0. */
-- sp = ((sp + 4) & -16ul) - 4;
-+ sp = ((sp - 12) & -16ul) - 4;
- return (void __user *) sp;
- }
+ #endif /* CONFIG_X86_32_LAZY_GS */
-@@ -380,10 +380,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
- } else {
- /* Return stub is in 32bit vsyscall page */
- if (current->mm->context.vdso)
-- restorer = current->mm->context.vdso +
-- selected_vdso32->sym___kernel_sigreturn;
-+ restorer = (void __force_user *)(current->mm->context.vdso +
-+ selected_vdso32->sym___kernel_sigreturn);
- else
-- restorer = &frame->retcode;
-+ restorer = frame->retcode;
- }
+-.macro SAVE_ALL
++.macro pax_enter_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++.endm
++
++.macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++.endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ bts $X86_CR0_WP_BIT, %esi
++ jnc 1f
++ mov %cs, %esi
++ cmp $__KERNEL_CS, %esi
++ jz 3f
++ ljmp $__KERNEL_CS, $3f
++1: ljmp $__KERNEXEC_KERNEL_CS, $2f
++2:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++#else
++ mov %esi, %cr0
++#endif
++3:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++#endif
++ mov %cs, %esi
++ cmp $__KERNEXEC_KERNEL_CS, %esi
++ jnz 2f
++#ifdef CONFIG_PARAVIRT
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ btr $X86_CR0_WP_BIT, %esi
++ ljmp $__KERNEL_CS, $1f
++1:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
++#else
++ mov %esi, %cr0
++#endif
++2:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_exit_kernel)
++#endif
++
++ .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * ebp: thread_info
++ */
++ENTRY(pax_erase_kstack)
++ pushl %edi
++ pushl %ecx
++ pushl %eax
++
++ mov TI_lowest_stack(%ebp), %edi
++ mov $-0xBEEF, %eax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $2, %ecx
++ repne scasl
++ jecxz 2f
++
++ cmp $2*16, %ecx
++ jc 2f
++
++ mov $2*16, %ecx
++ repe scasl
++ jecxz 2f
++ jne 1b
++
++2: cld
++ or $2*4, %edi
++ mov %esp, %ecx
++ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %ecx
++ jb 3f
++ ud2
++3:
++
++ shr $2, %ecx
++ rep stosl
++
++ mov TI_task_thread_sp0(%ebp), %edi
++ sub $128, %edi
++ mov %edi, TI_lowest_stack(%ebp)
++
++ popl %eax
++ popl %ecx
++ popl %edi
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
++
++.macro __SAVE_ALL _DS
+ cld
+ PUSH_GS
+ pushl %fs
+@@ -176,7 +317,7 @@
+ pushl %edx
+ pushl %ecx
+ pushl %ebx
+- movl $(__USER_DS), %edx
++ movl $\_DS, %edx
+ movl %edx, %ds
+ movl %edx, %es
+ movl $(__KERNEL_PERCPU), %edx
+@@ -184,6 +325,15 @@
+ SET_KERNEL_GS %edx
+ .endm
- put_user_try {
-@@ -393,7 +393,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
- * These are actually not used anymore, but left because some
- * gdb versions depend on them as a marker.
- */
-- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
-+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
- } put_user_catch(err);
++.macro SAVE_ALL
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ __SAVE_ALL __KERNEL_DS
++ pax_enter_kernel
++#else
++ __SAVE_ALL __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+ popl %ebx
+ popl %ecx
+@@ -222,7 +372,7 @@ ENTRY(ret_from_fork)
+ pushl $0x0202 # Reset kernel eflags
+ popfl
+ jmp syscall_exit
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
- if (err)
-@@ -435,7 +435,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
- 0xb8,
- __NR_ia32_rt_sigreturn,
- 0x80cd,
-- 0,
-+ 0
- };
+ ENTRY(ret_from_kernel_thread)
+ pushl %eax
+@@ -262,7 +412,15 @@ ret_from_intr:
+ andl $SEGMENT_RPL_MASK, %eax
+ #endif
+ cmpl $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jae resume_userspace
++
++ pax_exit_kernel
++ jmp resume_kernel
++#else
+ jb resume_kernel # not returning to v8086 or userspace
++#endif
- frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
-@@ -458,16 +458,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+ ENTRY(resume_userspace)
+ LOCKDEP_SYS_EXIT
+@@ -274,8 +432,8 @@ ENTRY(resume_userspace)
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
+ # int/exception return?
+ jne work_pending
+- jmp restore_all
+-END(ret_from_exception)
++ jmp restore_all_pax
++ENDPROC(ret_from_exception)
- if (ksig->ka.sa.sa_flags & SA_RESTORER)
- restorer = ksig->ka.sa.sa_restorer;
-+ else if (current->mm->context.vdso)
-+ /* Return stub is in 32bit vsyscall page */
-+ restorer = (void __force_user *)(current->mm->context.vdso +
-+ selected_vdso32->sym___kernel_rt_sigreturn);
- else
-- restorer = current->mm->context.vdso +
-- selected_vdso32->sym___kernel_rt_sigreturn;
-+ restorer = frame->retcode;
- put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+@@ -287,7 +445,7 @@ need_resched:
+ jz restore_all
+ call preempt_schedule_irq
+ jmp need_resched
+-END(resume_kernel)
++ENDPROC(resume_kernel)
+ #endif
- /*
- * Not actually used anymore, but left because some gdb
- * versions need it.
- */
-- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
-+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
- } put_user_catch(err);
+ /*
+@@ -312,32 +470,44 @@ sysenter_past_esp:
+ pushl $__USER_CS
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+- * A tiny bit of offset fixup is necessary: TI_sysenter_return
+- * is relative to thread_info, which is at the bottom of the
+- * kernel stack page. 4*4 means the 4 words pushed above;
+- * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
+- * and THREAD_SIZE takes us to the bottom.
+ */
+- pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
++ pushl $0
- err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
-diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
-index 72bf268..127572a 100644
---- a/arch/x86/ia32/ia32entry.S
-+++ b/arch/x86/ia32/ia32entry.S
-@@ -15,8 +15,10 @@
+ pushl %eax
+ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl TI_sysenter_return(%ebp), %ebp
++ movl %ebp, PT_EIP(%esp)
+ ENABLE_INTERRUPTS(CLBR_NONE)
+
+ /*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
++ movl PT_OLDESP(%esp),%ebp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov PT_OLDSS(%esp), %ds
++1: movl %ds:(%ebp), %ebp
++ push %ss
++ pop %ds
++#else
+ cmpl $__PAGE_OFFSET-3, %ebp
+ jae syscall_fault
+ ASM_STAC
+ 1: movl (%ebp), %ebp
+ ASM_CLAC
++#endif
++
+ movl %ebp, PT_EBP(%esp)
+ _ASM_EXTABLE(1b, syscall_fault)
+
+ GET_THREAD_INFO(%ebp)
+
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
+ jnz sysenter_audit
+ sysenter_do_call:
+@@ -353,12 +523,24 @@ sysenter_after_call:
+ testl $_TIF_ALLWORK_MASK, %ecx
+ jnz sysexit_audit
+ sysenter_exit:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushl %eax
++ movl %esp, %eax
++ call pax_randomize_kstack
++ popl %eax
++#endif
++
++ pax_erase_kstack
++
+ /* if something modifies registers it must also disable sysexit */
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp, %ebp
+ TRACE_IRQS_ON
+ 1: mov PT_FS(%esp), %fs
++2: mov PT_DS(%esp), %ds
++3: mov PT_ES(%esp), %es
+ PTGS_TO_GS
+ ENABLE_INTERRUPTS_SYSEXIT
+
+@@ -372,6 +554,9 @@ sysenter_audit:
+ pushl PT_ESI(%esp) /* a3: 5th arg */
+ pushl PT_EDX+4(%esp) /* a2: 4th arg */
+ call __audit_syscall_entry
++
++ pax_erase_kstack
++
+ popl %ecx /* get that remapped edx off the stack */
+ popl %ecx /* get that remapped esi off the stack */
+ movl PT_EAX(%esp), %eax /* reload syscall number */
+@@ -397,10 +582,16 @@ sysexit_audit:
+ #endif
+
+ .pushsection .fixup, "ax"
+-2: movl $0, PT_FS(%esp)
++4: movl $0, PT_FS(%esp)
++ jmp 1b
++5: movl $0, PT_DS(%esp)
++ jmp 1b
++6: movl $0, PT_ES(%esp)
+ jmp 1b
+ .popsection
+- _ASM_EXTABLE(1b, 2b)
++ _ASM_EXTABLE(1b, 4b)
++ _ASM_EXTABLE(2b, 5b)
++ _ASM_EXTABLE(3b, 6b)
+ PTGS_TO_GS_EX
+ ENDPROC(entry_SYSENTER_32)
+
+@@ -410,6 +601,11 @@ ENTRY(entry_INT80_32)
+ pushl %eax # save orig_eax
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ # system call tracing in operation / emulation
+ testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
+ jnz syscall_trace_entry
+@@ -429,6 +625,15 @@ syscall_exit:
+ testl $_TIF_ALLWORK_MASK, %ecx # current->work
+ jnz syscall_exit_work
+
++restore_all_pax:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ movl %esp, %eax
++ call pax_randomize_kstack
++#endif
++
++ pax_erase_kstack
++
+ restore_all:
+ TRACE_IRQS_IRET
+ restore_all_notrace:
+@@ -483,14 +688,34 @@ ldt_ss:
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
+ mov %esp, %edx /* load kernel esp */
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ shr $16, %edx
+- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
+- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %esi
++ btr $X86_CR0_WP_BIT, %esi
++ mov %esi, %cr0
++#endif
++
++ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
++ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ bts $X86_CR0_WP_BIT, %esi
++ mov %esi, %cr0
++#endif
++
+ pushl $__ESPFIX_SS
+ pushl %eax /* new kernel esp */
+ /*
+@@ -519,20 +744,18 @@ work_resched:
+ movl TI_flags(%ebp), %ecx
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+- jz restore_all
++ jz restore_all_pax
+ testb $_TIF_NEED_RESCHED, %cl
+ jnz work_resched
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
++ movl %esp, %eax
+ #ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+- movl %esp, %eax
+ jnz work_notifysig_v86 # returning to kernel-space or
+ # vm86-space
+ 1:
+-#else
+- movl %esp, %eax
+ #endif
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+@@ -553,7 +776,7 @@ work_notifysig_v86:
+ movl %eax, %esp
+ jmp 1b
+ #endif
+-END(work_pending)
++ENDPROC(work_pending)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -561,11 +784,14 @@ syscall_trace_entry:
+ movl $-ENOSYS, PT_EAX(%esp)
+ movl %esp, %eax
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /* What it returned is what we'll actually use. */
+ cmpl $(NR_syscalls), %eax
+ jnae syscall_call
+ jmp syscall_exit
+-END(syscall_trace_entry)
++ENDPROC(syscall_trace_entry)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -578,24 +804,28 @@ syscall_exit_work:
+ movl %esp, %eax
+ call syscall_trace_leave
+ jmp resume_userspace
+-END(syscall_exit_work)
++ENDPROC(syscall_exit_work)
+
+ syscall_fault:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %ss
++ pop %ds
++#endif
+ ASM_CLAC
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT, PT_EAX(%esp)
+ jmp resume_userspace
+-END(syscall_fault)
++ENDPROC(syscall_fault)
+
+ syscall_badsys:
+ movl $-ENOSYS, %eax
+ jmp syscall_after_call
+-END(syscall_badsys)
++ENDPROC(syscall_badsys)
+
+ sysenter_badsys:
+ movl $-ENOSYS, %eax
+ jmp sysenter_after_call
+-END(sysenter_badsys)
++ENDPROC(sysenter_badsys)
+
+ .macro FIXUP_ESPFIX_STACK
+ /*
+@@ -607,8 +837,15 @@ END(sysenter_badsys)
+ */
+ #ifdef CONFIG_X86_ESPFIX32
+ /* fixup the stack */
+- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
++ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
++ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
+ shl $16, %eax
+ addl %esp, %eax /* the adjusted stack pointer */
+ pushl $__KERNEL_DS
+@@ -644,7 +881,7 @@ ENTRY(irq_entries_start)
+ jmp common_interrupt
+ .align 8
+ .endr
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ /*
+ * the CPU automatically disables interrupts when executing an IRQ vector,
+@@ -691,7 +928,7 @@ ENTRY(coprocessor_error)
+ pushl $0
+ pushl $do_coprocessor_error
+ jmp error_code
+-END(coprocessor_error)
++ENDPROC(coprocessor_error)
+
+ ENTRY(simd_coprocessor_error)
+ ASM_CLAC
+@@ -705,25 +942,25 @@ ENTRY(simd_coprocessor_error)
+ pushl $do_simd_coprocessor_error
+ #endif
+ jmp error_code
+-END(simd_coprocessor_error)
++ENDPROC(simd_coprocessor_error)
+
+ ENTRY(device_not_available)
+ ASM_CLAC
+ pushl $-1 # mark this as an int
+ pushl $do_device_not_available
+ jmp error_code
+-END(device_not_available)
++ENDPROC(device_not_available)
+
+ #ifdef CONFIG_PARAVIRT
+ ENTRY(native_iret)
+ iret
+ _ASM_EXTABLE(native_iret, iret_exc)
+-END(native_iret)
++ENDPROC(native_iret)
+
+ ENTRY(native_irq_enable_sysexit)
+ sti
+ sysexit
+-END(native_irq_enable_sysexit)
++ENDPROC(native_irq_enable_sysexit)
+ #endif
+
+ ENTRY(overflow)
+@@ -731,59 +968,59 @@ ENTRY(overflow)
+ pushl $0
+ pushl $do_overflow
+ jmp error_code
+-END(overflow)
++ENDPROC(overflow)
+
+ ENTRY(bounds)
+ ASM_CLAC
+ pushl $0
+ pushl $do_bounds
+ jmp error_code
+-END(bounds)
++ENDPROC(bounds)
+
+ ENTRY(invalid_op)
+ ASM_CLAC
+ pushl $0
+ pushl $do_invalid_op
+ jmp error_code
+-END(invalid_op)
++ENDPROC(invalid_op)
+
+ ENTRY(coprocessor_segment_overrun)
+ ASM_CLAC
+ pushl $0
+ pushl $do_coprocessor_segment_overrun
+ jmp error_code
+-END(coprocessor_segment_overrun)
++ENDPROC(coprocessor_segment_overrun)
+
+ ENTRY(invalid_TSS)
+ ASM_CLAC
+ pushl $do_invalid_TSS
+ jmp error_code
+-END(invalid_TSS)
++ENDPROC(invalid_TSS)
+
+ ENTRY(segment_not_present)
+ ASM_CLAC
+ pushl $do_segment_not_present
+ jmp error_code
+-END(segment_not_present)
++ENDPROC(segment_not_present)
+
+ ENTRY(stack_segment)
+ ASM_CLAC
+ pushl $do_stack_segment
+ jmp error_code
+-END(stack_segment)
++ENDPROC(stack_segment)
+
+ ENTRY(alignment_check)
+ ASM_CLAC
+ pushl $do_alignment_check
+ jmp error_code
+-END(alignment_check)
++ENDPROC(alignment_check)
+
+ ENTRY(divide_error)
+ ASM_CLAC
+ pushl $0 # no error code
+ pushl $do_divide_error
+ jmp error_code
+-END(divide_error)
++ENDPROC(divide_error)
+
+ #ifdef CONFIG_X86_MCE
+ ENTRY(machine_check)
+@@ -791,7 +1028,7 @@ ENTRY(machine_check)
+ pushl $0
+ pushl machine_check_vector
+ jmp error_code
+-END(machine_check)
++ENDPROC(machine_check)
+ #endif
+
+ ENTRY(spurious_interrupt_bug)
+@@ -799,7 +1036,7 @@ ENTRY(spurious_interrupt_bug)
+ pushl $0
+ pushl $do_spurious_interrupt_bug
+ jmp error_code
+-END(spurious_interrupt_bug)
++ENDPROC(spurious_interrupt_bug)
+
+ #ifdef CONFIG_XEN
+ /*
+@@ -906,7 +1143,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+
+ ENTRY(mcount)
+ ret
+-END(mcount)
++ENDPROC(mcount)
+
+ ENTRY(ftrace_caller)
+ pushl %eax
+@@ -936,7 +1173,7 @@ ftrace_graph_call:
+ .globl ftrace_stub
+ ftrace_stub:
+ ret
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+
+ ENTRY(ftrace_regs_caller)
+ pushf /* push flags before compare (in cs location) */
+@@ -1034,7 +1271,7 @@ trace:
+ popl %ecx
+ popl %eax
+ jmp ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+
+@@ -1052,7 +1289,7 @@ ENTRY(ftrace_graph_caller)
+ popl %ecx
+ popl %eax
+ ret
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+
+ .globl return_to_handler
+ return_to_handler:
+@@ -1100,14 +1337,17 @@ error_code:
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ REG_TO_PTGS %ecx
+ SET_KERNEL_GS %ecx
+- movl $(__USER_DS), %ecx
++ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
++
++ pax_enter_kernel
++
+ TRACE_IRQS_OFF
+ movl %esp, %eax # pt_regs pointer
+ call *%edi
+ jmp ret_from_exception
+-END(page_fault)
++ENDPROC(page_fault)
+
+ /*
+ * Debug traps and NMI can happen at the one SYSENTER instruction
+@@ -1145,7 +1385,7 @@ debug_stack_correct:
+ movl %esp, %eax # pt_regs pointer
+ call do_debug
+ jmp ret_from_exception
+-END(debug)
++ENDPROC(debug)
+
+ /*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+@@ -1184,6 +1424,9 @@ nmi_stack_correct:
+ xorl %edx, %edx # zero error code
+ movl %esp, %eax # pt_regs pointer
+ call do_nmi
++
++ pax_exit_kernel
++
+ jmp restore_all_notrace
+
+ nmi_stack_fixup:
+@@ -1217,11 +1460,14 @@ nmi_espfix_stack:
+ FIXUP_ESPFIX_STACK # %eax == %esp
+ xorl %edx, %edx # zero error code
+ call do_nmi
++
++ pax_exit_kernel
++
+ RESTORE_REGS
+ lss 12+4(%esp), %esp # back to espfix stack
+ jmp irq_return
+ #endif
+-END(nmi)
++ENDPROC(nmi)
+
+ ENTRY(int3)
+ ASM_CLAC
+@@ -1232,17 +1478,17 @@ ENTRY(int3)
+ movl %esp, %eax # pt_regs pointer
+ call do_int3
+ jmp ret_from_exception
+-END(int3)
++ENDPROC(int3)
+
+ ENTRY(general_protection)
+ pushl $do_general_protection
+ jmp error_code
+-END(general_protection)
++ENDPROC(general_protection)
+
+ #ifdef CONFIG_KVM_GUEST
+ ENTRY(async_page_fault)
+ ASM_CLAC
+ pushl $do_async_page_fault
+ jmp error_code
+-END(async_page_fault)
++ENDPROC(async_page_fault)
+ #endif
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index 8cb3e43..a497278 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -37,6 +37,8 @@
+ #include <asm/smap.h>
+ #include <asm/pgtable_types.h>
+ #include <linux/err.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -54,6 +56,402 @@ ENTRY(native_usergs_sysret64)
+ ENDPROC(native_usergs_sysret64)
+ #endif /* CONFIG_PARAVIRT */
+
++ .macro ljmpq sel, off
++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
++ .byte 0x48; ljmp *1234f(%rip)
++ .pushsection .rodata
++ .align 16
++ 1234: .quad \off; .word \sel
++ .popsection
++#else
++ pushq $\sel
++ pushq $\off
++ lretq
++#endif
++ .endm
++
++ .macro pax_enter_kernel
++ pax_set_fptr_mask
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ call pax_enter_kernel
++#endif
++ .endm
++
++ .macro pax_exit_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ call pax_exit_kernel
++#endif
++ .endm
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ENTRY(pax_enter_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $X86_CR0_WP_BIT,%rdi
++ jnc 3f
++ mov %cs,%edi
++ cmp $__KERNEL_CS,%edi
++ jnz 2f
++1:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++ GET_CR3_INTO_RDI
++ cmp $0,%dil
++ jnz 112f
++ mov $__KERNEL_DS,%edi
++ mov %edi,%ss
++ jmp 111f
++112: cmp $1,%dil
++ jz 113f
++ ud2
++113: sub $4097,%rdi
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ mov $__UDEREF_KERNEL_DS,%edi
++ mov %edi,%ss
++111:
++#endif
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++#ifdef CONFIG_PAX_KERNEXEC
++2: ljmpq __KERNEL_CS,1b
++3: ljmpq __KERNEXEC_KERNEL_CS,4f
++4: SET_RDI_INTO_CR0
++ jmp 1b
++#endif
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cs,%rdi
++ cmp $__KERNEXEC_KERNEL_CS,%edi
++ jz 2f
++ GET_CR0_INTO_RDI
++ bts $X86_CR0_WP_BIT,%rdi
++ jnc 4f
++1:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++ mov %ss,%edi
++ cmp $__UDEREF_KERNEL_DS,%edi
++ jnz 111f
++ GET_CR3_INTO_RDI
++ cmp $0,%dil
++ jz 112f
++ ud2
++112: add $4097,%rdi
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ mov $__KERNEL_DS,%edi
++ mov %edi,%ss
++111:
++#endif
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++#ifdef CONFIG_PAX_KERNEXEC
++2: GET_CR0_INTO_RDI
++ btr $X86_CR0_WP_BIT,%rdi
++ jnc 4f
++ ljmpq __KERNEL_CS,3f
++3: SET_RDI_INTO_CR0
++ jmp 1b
++4: ud2
++ jmp 4b
++#endif
++ENDPROC(pax_exit_kernel)
++#endif
++
++ .macro pax_enter_kernel_user
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ pushq %r11
++ call pax_randomize_kstack
++ popq %r11
++ popq %rax
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ENTRY(pax_enter_kernel_user)
++ pushq %rdi
++ pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++ GET_CR3_INTO_RDI
++ cmp $1,%dil
++ jnz 4f
++ sub $4097,%rdi
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ jmp 3f
++111:
++
++ GET_CR3_INTO_RDI
++ mov %rdi,%rbx
++ add $__START_KERNEL_map,%rbx
++ sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ pushq %rdi
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ popq %rdi
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0,i*8(%rbx)
++ i = i + 1
++ .endr
++
++2: SET_RDI_INTO_CR3
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $X86_CR0_WP_BIT,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++3:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rbx
++ popq %rdi
++ pax_force_retaddr
++ retq
++4: ud2
++ENDPROC(pax_enter_kernel_user)
++
++ENTRY(pax_exit_kernel_user)
++ pushq %rdi
++ pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR3_INTO_RDI
++ ALTERNATIVE "jmp 1f", "", X86_FEATURE_PCID
++ cmp $0,%dil
++ jnz 3f
++ add $4097,%rdi
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ jmp 2f
++1:
++
++ mov %rdi,%rbx
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ btr $X86_CR0_WP_BIT,%rdi
++ jnc 3f
++ SET_RDI_INTO_CR0
++#endif
++
++ add $__START_KERNEL_map,%rbx
++ sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0x67,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0x67,i*8(%rbx)
++ i = i + 1
++ .endr
++2:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rbx
++ popq %rdi
++ pax_force_retaddr
++ retq
++3: ud2
++ENDPROC(pax_exit_kernel_user)
++#endif
++
++ .macro pax_enter_kernel_nmi
++ pax_set_fptr_mask
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $X86_CR0_WP_BIT,%rdi
++ jc 110f
++ SET_RDI_INTO_CR0
++ or $2,%ebx
++110:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++ GET_CR3_INTO_RDI
++ cmp $0,%dil
++ jz 111f
++ sub $4097,%rdi
++ or $4,%ebx
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ mov $__UDEREF_KERNEL_DS,%edi
++ mov %edi,%ss
++111:
++#endif
++ .endm
++
++ .macro pax_exit_kernel_nmi
++#ifdef CONFIG_PAX_KERNEXEC
++ btr $1,%ebx
++ jnc 110f
++ GET_CR0_INTO_RDI
++ btr $X86_CR0_WP_BIT,%rdi
++ SET_RDI_INTO_CR0
++110:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++ btr $2,%ebx
++ jnc 111f
++ GET_CR3_INTO_RDI
++ add $4097,%rdi
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ mov $__KERNEL_DS,%edi
++ mov %edi,%ss
++111:
++#endif
++ .endm
++
++ .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ENTRY(pax_erase_kstack)
++ pushq %rdi
++ pushq %rcx
++ pushq %rax
++ pushq %r11
++
++ GET_THREAD_INFO(%r11)
++ mov TI_lowest_stack(%r11), %rdi
++ mov $-0xBEEF, %rax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $3, %ecx
++ repne scasq
++ jecxz 2f
++
++ cmp $2*8, %ecx
++ jc 2f
++
++ mov $2*8, %ecx
++ repe scasq
++ jecxz 2f
++ jne 1b
++
++2: cld
++ or $2*8, %rdi
++ mov %esp, %ecx
++ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %rcx
++ jb 3f
++ ud2
++3:
++
++ shr $3, %ecx
++ rep stosq
++
++ mov TI_task_thread_sp0(%r11), %rdi
++ sub $256, %rdi
++ mov %rdi, TI_lowest_stack(%r11)
++
++ popq %r11
++ popq %rax
++ popq %rcx
++ popq %rdi
++ pax_force_retaddr
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
++
+ .macro TRACE_IRQS_IRETQ
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ bt $9, EFLAGS(%rsp) /* interrupts off? */
+@@ -89,7 +487,7 @@ ENDPROC(native_usergs_sysret64)
+ .endm
+
+ .macro TRACE_IRQS_IRETQ_DEBUG
+- bt $9, EFLAGS(%rsp) /* interrupts off? */
++ bt $X86_EFLAGS_IF_BIT, EFLAGS(%rsp) /* interrupts off? */
+ jnc 1f
+ TRACE_IRQS_ON_DEBUG
+ 1:
+@@ -149,14 +547,6 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+ /* Construct struct pt_regs on stack */
+ pushq $__USER_DS /* pt_regs->ss */
+ pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
+- /*
+- * Re-enable interrupts.
+- * We use 'rsp_scratch' as a scratch space, hence irq-off block above
+- * must execute atomically in the face of possible interrupt-driven
+- * task preemption. We must enable interrupts only after we're done
+- * with using rsp_scratch:
+- */
+- ENABLE_INTERRUPTS(CLBR_NONE)
+ pushq %r11 /* pt_regs->flags */
+ pushq $__USER_CS /* pt_regs->cs */
+ pushq %rcx /* pt_regs->ip */
+@@ -172,7 +562,27 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+ pushq %r11 /* pt_regs->r11 */
+ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
+
+- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq %r12, R12(%rsp)
++#endif
++
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
++ /*
++ * Re-enable interrupts.
++ * We use 'rsp_scratch' as a scratch space, hence irq-off block above
++ * must execute atomically in the face of possible interrupt-driven
++ * task preemption. We must enable interrupts only after we're done
++ * with using rsp_scratch:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
++
++ GET_THREAD_INFO(%rcx)
++ testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%rcx)
+ jnz tracesys
+ entry_SYSCALL_64_fastpath:
+ #if __SYSCALL_MASK == ~0
+@@ -205,9 +615,13 @@ entry_SYSCALL_64_fastpath:
+ * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
+ * very bad.
+ */
+- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ GET_THREAD_INFO(%rcx)
++ testl $_TIF_ALLWORK_MASK, TI_flags(%rcx)
+ jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
+
++ pax_exit_kernel_user
++ pax_erase_kstack
++
+ RESTORE_C_REGS_EXCEPT_RCX_R11
+ movq RIP(%rsp), %rcx
+ movq EFLAGS(%rsp), %r11
+@@ -236,6 +650,9 @@ tracesys:
+ call syscall_trace_enter_phase1
+ test %rax, %rax
+ jnz tracesys_phase2 /* if needed, run the slow path */
++
++ pax_erase_kstack
++
+ RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
+ movq ORIG_RAX(%rsp), %rax
+ jmp entry_SYSCALL_64_fastpath /* and return to the fast path */
+@@ -247,6 +664,8 @@ tracesys_phase2:
+ movq %rax, %rdx
+ call syscall_trace_enter_phase2
+
++ pax_erase_kstack
++
+ /*
+ * Reload registers from stack in case ptrace changed them.
+ * We don't reload %rax because syscall_trace_entry_phase2() returned
+@@ -284,6 +703,8 @@ GLOBAL(int_with_check)
+ andl %edi, %edx
+ jnz int_careful
+ andl $~TS_COMPAT, TI_status(%rcx)
++ pax_exit_kernel_user
++ pax_erase_kstack
+ jmp syscall_return
+
+ /*
+@@ -407,14 +828,14 @@ syscall_return_via_sysret:
+ opportunistic_sysret_failed:
+ SWAPGS
+ jmp restore_c_regs_and_iret
+-END(entry_SYSCALL_64)
++ENDPROC(entry_SYSCALL_64)
+
+
+ .macro FORK_LIKE func
+ ENTRY(stub_\func)
+ SAVE_EXTRA_REGS 8
+ jmp sys_\func
+-END(stub_\func)
++ENDPROC(stub_\func)
+ .endm
+
+ FORK_LIKE clone
+@@ -434,7 +855,7 @@ return_from_execve:
+ ZERO_EXTRA_REGS
+ movq %rax, RAX(%rsp)
+ jmp int_ret_from_sys_call
+-END(stub_execve)
++ENDPROC(stub_execve)
+ /*
+ * Remaining execve stubs are only 7 bytes long.
+ * ENTRY() often aligns to 16 bytes, which in this case has no benefits.
+@@ -443,7 +864,7 @@ END(stub_execve)
+ GLOBAL(stub_execveat)
+ call sys_execveat
+ jmp return_from_execve
+-END(stub_execveat)
++ENDPROC(stub_execveat)
+
+ #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
+ .align 8
+@@ -451,15 +872,15 @@ GLOBAL(stub_x32_execve)
+ GLOBAL(stub32_execve)
+ call compat_sys_execve
+ jmp return_from_execve
+-END(stub32_execve)
+-END(stub_x32_execve)
++ENDPROC(stub32_execve)
++ENDPROC(stub_x32_execve)
+ .align 8
+ GLOBAL(stub_x32_execveat)
+ GLOBAL(stub32_execveat)
+ call compat_sys_execveat
+ jmp return_from_execve
+-END(stub32_execveat)
+-END(stub_x32_execveat)
++ENDPROC(stub32_execveat)
++ENDPROC(stub_x32_execveat)
+ #endif
+
+ /*
+@@ -488,7 +909,7 @@ ENTRY(stub_x32_rt_sigreturn)
+ SAVE_EXTRA_REGS 8
+ call sys32_x32_rt_sigreturn
+ jmp return_from_stub
+-END(stub_x32_rt_sigreturn)
++ENDPROC(stub_x32_rt_sigreturn)
+ #endif
+
+ /*
+@@ -527,7 +948,7 @@ ENTRY(ret_from_fork)
+ movl $0, RAX(%rsp)
+ RESTORE_EXTRA_REGS
+ jmp int_ret_from_sys_call
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+
+ /*
+ * Build the entry stubs with some assembler magic.
+@@ -542,7 +963,7 @@ ENTRY(irq_entries_start)
+ jmp common_interrupt
+ .align 8
+ .endr
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ /*
+ * Interrupt entry/exit.
+@@ -555,21 +976,13 @@ END(irq_entries_start)
+ /* 0(%rsp): ~(interrupt number) */
+ .macro interrupt func
+ cld
+- /*
+- * Since nothing in interrupt handling code touches r12...r15 members
+- * of "struct pt_regs", and since interrupts can nest, we can save
+- * four stack slots and simultaneously provide
+- * an unwind-friendly stack layout by saving "truncated" pt_regs
+- * exactly up to rbp slot, without these members.
+- */
+- ALLOC_PT_GPREGS_ON_STACK -RBP
+- SAVE_C_REGS -RBP
+- /* this goes to 0(%rsp) for unwinder, not for saving the value: */
+- SAVE_EXTRA_REGS_RBP -RBP
++ ALLOC_PT_GPREGS_ON_STACK
++ SAVE_C_REGS
++ SAVE_EXTRA_REGS
+
+- leaq -RBP(%rsp), %rdi /* arg1 for \func (pointer to pt_regs) */
++ movq %rsp, %rdi /* arg1 for \func (pointer to pt_regs) */
+
+- testb $3, CS-RBP(%rsp)
++ testb $3, CS(%rsp)
+ jz 1f
+ SWAPGS
+ 1:
+@@ -584,6 +997,18 @@ END(irq_entries_start)
+ incl PER_CPU_VAR(irq_count)
+ cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
+ pushq %rsi
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rdi)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
++
+ /* We entered an interrupt context - irqs are off: */
+ TRACE_IRQS_OFF
+
+@@ -608,7 +1033,7 @@ ret_from_intr:
+ /* Restore saved previous stack */
+ popq %rsi
+ /* return code expects complete pt_regs - adjust rsp accordingly: */
+- leaq -RBP(%rsi), %rsp
++ movq %rsi, %rsp
+
+ testb $3, CS(%rsp)
+ jz retint_kernel
+@@ -630,6 +1055,8 @@ retint_swapgs: /* return to user-space */
+ * The iretq could re-enable interrupts:
+ */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel_user
++# pax_erase_kstack
+ TRACE_IRQS_IRETQ
+
+ SWAPGS
+@@ -648,6 +1075,21 @@ retint_kernel:
+ jmp 0b
+ 1:
+ #endif
++
++ pax_exit_kernel
++
++#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
++ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
++ * namely calling EFI runtime services with a phys mapping. We're
++ * starting off with NOPs and patch in the real instrumentation
++ * (BTS/OR) before starting any userland process; even before starting
++ * up the APs.
++ */
++ ALTERNATIVE "", "pax_force_retaddr 16*8", X86_FEATURE_ALWAYS
++#else
++ pax_force_retaddr RIP
++#endif
++
+ /*
+ * The iretq could re-enable interrupts:
+ */
+@@ -689,15 +1131,15 @@ native_irq_return_ldt:
+ SWAPGS
+ movq PER_CPU_VAR(espfix_waddr), %rdi
+ movq %rax, (0*8)(%rdi) /* RAX */
+- movq (2*8)(%rsp), %rax /* RIP */
++ movq (2*8 + RIP-RIP)(%rsp), %rax /* RIP */
+ movq %rax, (1*8)(%rdi)
+- movq (3*8)(%rsp), %rax /* CS */
++ movq (2*8 + CS-RIP)(%rsp), %rax /* CS */
+ movq %rax, (2*8)(%rdi)
+- movq (4*8)(%rsp), %rax /* RFLAGS */
++ movq (2*8 + EFLAGS-RIP)(%rsp), %rax /* RFLAGS */
+ movq %rax, (3*8)(%rdi)
+- movq (6*8)(%rsp), %rax /* SS */
++ movq (2*8 + SS-RIP)(%rsp), %rax /* SS */
+ movq %rax, (5*8)(%rdi)
+- movq (5*8)(%rsp), %rax /* RSP */
++ movq (2*8 + RSP-RIP)(%rsp), %rax /* RSP */
+ movq %rax, (4*8)(%rdi)
+ andl $0xffff0000, %eax
+ popq %rdi
+@@ -738,7 +1180,7 @@ retint_signal:
+ GET_THREAD_INFO(%rcx)
+ jmp retint_with_reschedule
+
+-END(common_interrupt)
++ENDPROC(common_interrupt)
+
+ /*
+ * APIC interrupts.
+@@ -750,7 +1192,7 @@ ENTRY(\sym)
+ .Lcommon_\sym:
+ interrupt \do_sym
+ jmp ret_from_intr
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ #ifdef CONFIG_TRACING
+@@ -815,7 +1257,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
+ /*
+ * Exception entry points.
+ */
+-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
++#define CPU_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
+
+ .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
+ ENTRY(\sym)
+@@ -862,6 +1304,12 @@ ENTRY(\sym)
+ .endif
+
+ .if \shift_ist != -1
++#ifdef CONFIG_SMP
++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
++ lea cpu_tss(%r13), %r13
++#else
++ lea cpu_tss(%rip), %r13
++#endif
+ subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
+ .endif
+
+@@ -905,7 +1353,7 @@ ENTRY(\sym)
+
+ jmp error_exit /* %ebx: no swapgs flag */
+ .endif
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ #ifdef CONFIG_TRACING
+@@ -947,8 +1395,9 @@ gs_change:
+ 2: mfence /* workaround */
+ SWAPGS
+ popfq
++ pax_force_retaddr
+ ret
+-END(native_load_gs_index)
++ENDPROC(native_load_gs_index)
+
+ _ASM_EXTABLE(gs_change, bad_gs)
+ .section .fixup, "ax"
+@@ -970,8 +1419,9 @@ ENTRY(do_softirq_own_stack)
+ call __do_softirq
+ leaveq
+ decl PER_CPU_VAR(irq_count)
++ pax_force_retaddr
+ ret
+-END(do_softirq_own_stack)
++ENDPROC(do_softirq_own_stack)
+
+ #ifdef CONFIG_XEN
+ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
+@@ -1007,7 +1457,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
+ call xen_maybe_preempt_hcall
+ #endif
+ jmp error_exit
+-END(xen_do_hypervisor_callback)
++ENDPROC(xen_do_hypervisor_callback)
+
+ /*
+ * Hypervisor uses this for application faults while it executes.
+@@ -1052,7 +1502,7 @@ ENTRY(xen_failsafe_callback)
+ SAVE_C_REGS
+ SAVE_EXTRA_REGS
+ jmp error_exit
+-END(xen_failsafe_callback)
++ENDPROC(xen_failsafe_callback)
+
+ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
+ xen_hvm_callback_vector xen_evtchn_do_upcall
+@@ -1101,8 +1551,36 @@ ENTRY(paranoid_entry)
+ js 1f /* negative -> in kernel */
+ SWAPGS
+ xorl %ebx, %ebx
+-1: ret
+-END(paranoid_entry)
++1:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS+8(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
++ pax_force_retaddr
++ ret
++ENDPROC(paranoid_entry)
++
++ENTRY(paranoid_entry_nmi)
++ cld
++ SAVE_C_REGS 8
++ SAVE_EXTRA_REGS 8
++ movl $1, %ebx
++ movl $MSR_GS_BASE, %ecx
++ rdmsr
++ testl %edx, %edx
++ js 1f /* negative -> in kernel */
++ SWAPGS
++ xorl %ebx, %ebx
++1: pax_enter_kernel_nmi
++ pax_force_retaddr
++ ret
++ENDPROC(paranoid_entry_nmi)
+
+ /*
+ * "Paranoid" exit path from exception stack. This is invoked
+@@ -1119,19 +1597,26 @@ END(paranoid_entry)
+ ENTRY(paranoid_exit)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF_DEBUG
+- testl %ebx, %ebx /* swapgs needed? */
++ testl $1, %ebx /* swapgs needed? */
+ jnz paranoid_exit_no_swapgs
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel_user
++#else
++ pax_exit_kernel
++#endif
+ TRACE_IRQS_IRETQ
+ SWAPGS_UNSAFE_STACK
+ jmp paranoid_exit_restore
+ paranoid_exit_no_swapgs:
++ pax_exit_kernel
+ TRACE_IRQS_IRETQ_DEBUG
+ paranoid_exit_restore:
+ RESTORE_EXTRA_REGS
+ RESTORE_C_REGS
+ REMOVE_PT_GPREGS_FROM_STACK 8
++ pax_force_retaddr_bts
+ INTERRUPT_RETURN
+-END(paranoid_exit)
++ENDPROC(paranoid_exit)
+
+ /*
+ * Save all registers in pt_regs, and switch gs if needed.
+@@ -1149,7 +1634,18 @@ ENTRY(error_entry)
+ SWAPGS
+
+ error_entry_done:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS+8(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ TRACE_IRQS_OFF
++ pax_force_retaddr
+ ret
+
+ /*
+@@ -1199,7 +1695,7 @@ error_bad_iret:
+ mov %rax, %rsp
+ decl %ebx
+ jmp error_entry_done
+-END(error_entry)
++ENDPROC(error_entry)
+
+
+ /*
+@@ -1212,10 +1708,10 @@ ENTRY(error_exit)
+ RESTORE_EXTRA_REGS
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl %eax, %eax
++ testl $1, %eax
+ jnz retint_kernel
+ jmp retint_user
+-END(error_exit)
++ENDPROC(error_exit)
+
+ /* Runs on exception stack */
+ ENTRY(nmi)
+@@ -1258,6 +1754,8 @@ ENTRY(nmi)
+ * other IST entries.
+ */
+
++ ASM_CLAC
++
+ /* Use %rdx as our temp variable throughout */
+ pushq %rdx
+
+@@ -1298,6 +1796,12 @@ ENTRY(nmi)
+ pushq %r14 /* pt_regs->r14 */
+ pushq %r15 /* pt_regs->r15 */
+
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ xorl %ebx, %ebx
++#endif
++
++ pax_enter_kernel_nmi
++
+ /*
+ * At this point we no longer need to worry about stack damage
+ * due to nesting -- we're on the normal thread stack and we're
+@@ -1308,12 +1812,19 @@ ENTRY(nmi)
+ movq $-1, %rsi
+ call do_nmi
+
++ pax_exit_kernel_nmi
++
+ /*
+ * Return back to user mode. We must *not* do the normal exit
+ * work, because we don't want to enable interrupts. Fortunately,
+ * do_nmi doesn't modify pt_regs.
+ */
+ SWAPGS
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ movq RBX(%rsp), %rbx
++#endif
++
+ jmp restore_c_regs_and_iret
+
+ .Lnmi_from_kernel:
+@@ -1435,6 +1946,7 @@ nested_nmi_out:
+ popq %rdx
+
+ /* We are returning to kernel mode, so this cannot result in a fault. */
++# pax_force_retaddr_bts
+ INTERRUPT_RETURN
+
+ first_nmi:
+@@ -1508,20 +2020,22 @@ end_repeat_nmi:
+ ALLOC_PT_GPREGS_ON_STACK
+
+ /*
+- * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
++ * Use paranoid_entry_nmi to handle SWAPGS, but no need to use paranoid_exit
+ * as we should not be calling schedule in NMI context.
+ * Even with normal interrupts enabled. An NMI should not be
+ * setting NEED_RESCHED or anything that normal interrupts and
+ * exceptions might do.
+ */
+- call paranoid_entry
++ call paranoid_entry_nmi
+
+ /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+ movq %rsp, %rdi
+ movq $-1, %rsi
+ call do_nmi
+
+- testl %ebx, %ebx /* swapgs needed? */
++ pax_exit_kernel_nmi
++
++ testl $1, %ebx /* swapgs needed? */
+ jnz nmi_restore
+ nmi_swapgs:
+ SWAPGS_UNSAFE_STACK
+@@ -1532,6 +2046,8 @@ nmi_restore:
+ /* Point RSP at the "iret" frame. */
+ REMOVE_PT_GPREGS_FROM_STACK 6*8
+
++ pax_force_retaddr_bts
++
+ /*
+ * Clear "NMI executing". Set DF first so that we can easily
+ * distinguish the remaining code between here and IRET from
+@@ -1549,9 +2065,9 @@ nmi_restore:
+ * mode, so this cannot result in a fault.
+ */
+ INTERRUPT_RETURN
+-END(nmi)
++ENDPROC(nmi)
+
+ ENTRY(ignore_sysret)
+ mov $-ENOSYS, %eax
+ sysret
+-END(ignore_sysret)
++ENDPROC(ignore_sysret)
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index a7e257d..3a6ad23 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -13,8 +13,10 @@
#include <asm/irqflags.h>
#include <asm/asm.h>
#include <asm/smap.h>
@@ -14704,75 +16634,70 @@ index 72bf268..127572a 100644
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
-@@ -85,6 +87,32 @@ ENTRY(native_irq_enable_sysexit)
- ENDPROC(native_irq_enable_sysexit)
+@@ -35,6 +37,32 @@ ENTRY(native_usergs_sysret32)
+ ENDPROC(native_usergs_sysret32)
#endif
+ .macro pax_enter_kernel_user
+ pax_set_fptr_mask
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_enter_kernel_user
++ call pax_enter_kernel_user
+#endif
+ .endm
+
+ .macro pax_exit_kernel_user
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_exit_kernel_user
++ call pax_exit_kernel_user
+#endif
+#ifdef CONFIG_PAX_RANDKSTACK
-+ pushq %rax
-+ pushq %r11
-+ call pax_randomize_kstack
-+ popq %r11
-+ popq %rax
++ pushq %rax
++ pushq %r11
++ call pax_randomize_kstack
++ popq %r11
++ popq %rax
+#endif
+ .endm
+
+ .macro pax_erase_kstack
+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ call pax_erase_kstack
++ call pax_erase_kstack
+#endif
+ .endm
+
/*
- * 32bit SYSENTER instruction entry.
+ * 32-bit SYSENTER instruction entry.
*
-@@ -119,23 +147,24 @@ ENTRY(ia32_sysenter_target)
- * it is too small to ever cause noticeable irq latency.
+@@ -65,20 +93,21 @@ ENTRY(entry_SYSENTER_compat)
*/
SWAPGS_UNSAFE_STACK
-- movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
- ENABLE_INTERRUPTS(CLBR_NONE)
-+ movq PER_CPU_VAR(kernel_stack), %rsp
/* Zero-extending 32-bit regs, do not remove */
movl %ebp, %ebp
movl %eax, %eax
- movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
-- CFI_REGISTER rip,r10
+ GET_THREAD_INFO(%r11)
+ movl TI_sysenter_return(%r11), %r11d
-+ CFI_REGISTER rip,r11
/* Construct struct pt_regs on stack */
- pushq_cfi $__USER32_DS /* pt_regs->ss */
- pushq_cfi %rbp /* pt_regs->sp */
- CFI_REL_OFFSET rsp,0
- pushfq_cfi /* pt_regs->flags */
+ pushq $__USER32_DS /* pt_regs->ss */
+ pushq %rbp /* pt_regs->sp */
+ pushfq /* pt_regs->flags */
+ orl $X86_EFLAGS_IF,(%rsp)
- pushq_cfi $__USER32_CS /* pt_regs->cs */
-- pushq_cfi %r10 /* pt_regs->ip = thread_info->sysenter_return */
-+ pushq_cfi %r11 /* pt_regs->ip = thread_info->sysenter_return */
- CFI_REL_OFFSET rip,0
- pushq_cfi_reg rax /* pt_regs->orig_ax */
- pushq_cfi_reg rdi /* pt_regs->di */
-@@ -147,15 +176,37 @@ ENTRY(ia32_sysenter_target)
- sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
- CFI_ADJUST_CFA_OFFSET 10*8
+ pushq $__USER32_CS /* pt_regs->cs */
+- pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */
++ pushq %r11 /* pt_regs->ip = thread_info->sysenter_return */
+ pushq %rax /* pt_regs->orig_ax */
+ pushq %rdi /* pt_regs->di */
+ pushq %rsi /* pt_regs->si */
+@@ -88,15 +117,37 @@ ENTRY(entry_SYSENTER_compat)
+ cld
+ sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12
++ movq %r12, R12(%rsp)
+#endif
+
+ pax_enter_kernel_user
@@ -14785,17 +16710,17 @@ index 72bf268..127572a 100644
+
/*
* no need to do an access_ok check here because rbp has been
- * 32bit zero extended
+ * 32-bit zero extended
*/
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ addq pax_user_shadow_base,%rbp
++ addq pax_user_shadow_base, %rbp
+ ASM_PAX_OPEN_USERLAND
+#endif
+
ASM_STAC
- 1: movl (%rbp),%ebp
- _ASM_EXTABLE(1b,ia32_badarg)
+ 1: movl (%rbp), %ebp
+ _ASM_EXTABLE(1b, ia32_badarg)
ASM_CLAC
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -14805,107 +16730,105 @@ index 72bf268..127572a 100644
/*
* Sysenter doesn't filter flags, so we need to clear NT
* ourselves. To save a few cycles, we can check whether
-@@ -165,8 +216,9 @@ ENTRY(ia32_sysenter_target)
- jnz sysenter_fix_flags
+@@ -106,8 +157,9 @@ ENTRY(entry_SYSENTER_compat)
+ jnz sysenter_fix_flags
sysenter_flags_fixed:
-- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ GET_THREAD_INFO(%r11)
-+ orl $TS_COMPAT,TI_status(%r11)
-+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
- CFI_REMEMBER_STATE
- jnz sysenter_tracesys
- cmpq $(IA32_NR_syscalls-1),%rax
-@@ -181,9 +233,10 @@ sysenter_do_call:
- sysenter_dispatch:
- call *ia32_sys_call_table(,%rax,8)
- movq %rax,RAX(%rsp)
++ orl $TS_COMPAT, TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%r11)
+ jnz sysenter_tracesys
+
+ sysenter_do_call:
+@@ -123,9 +175,10 @@ sysenter_dispatch:
+ call *ia32_sys_call_table(, %rax, 8)
+ movq %rax, RAX(%rsp)
+ 1:
+ GET_THREAD_INFO(%r11)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
++ testl $_TIF_ALLWORK_MASK, TI_flags(%r11)
jnz sysexit_audit
sysexit_from_sys_call:
/*
-@@ -196,7 +249,9 @@ sysexit_from_sys_call:
+@@ -138,7 +191,9 @@ sysexit_from_sys_call:
* This code path is still called 'sysexit' because it pairs
* with 'sysenter' and it uses the SYSENTER calling convention.
*/
-- andl $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+- andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+ pax_exit_kernel_user
+ pax_erase_kstack
-+ andl $~TS_COMPAT,TI_status(%r11)
- movl RIP(%rsp),%ecx /* User %eip */
- CFI_REGISTER rip,rcx
++ andl $~TS_COMPAT, TI_status(%r11)
+ movl RIP(%rsp), %ecx /* User %eip */
+ movq RAX(%rsp), %rax
RESTORE_RSI_RDI
-@@ -247,6 +302,9 @@ sysexit_from_sys_call:
- movl %ebx,%esi /* 2nd arg: 1st syscall arg */
- movl %eax,%edi /* 1st arg: syscall number */
- call __audit_syscall_entry
-+
+@@ -194,6 +249,8 @@ sysexit_from_sys_call:
+ movl %eax, %edi /* arg1 (RDI) <= syscall number (EAX) */
+ call __audit_syscall_entry
+
+ pax_erase_kstack
+
- movl RAX(%rsp),%eax /* reload syscall number */
- cmpq $(IA32_NR_syscalls-1),%rax
- ja ia32_badsys
-@@ -258,7 +316,7 @@ sysexit_from_sys_call:
+ /*
+ * We are going to jump back to the syscall dispatch code.
+ * Prepare syscall args as required by the 64-bit C ABI.
+@@ -209,7 +266,7 @@ sysexit_from_sys_call:
.endm
.macro auditsys_exit exit
-- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
- jnz ia32_ret_from_sys_call
+- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), TI_flags(%r11)
+ jnz ia32_ret_from_sys_call
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
-@@ -269,11 +327,12 @@ sysexit_from_sys_call:
- 1: setbe %al /* 1 if error, 0 if not */
- movzbl %al,%edi /* zero-extend that into %edi */
- call __audit_syscall_exit
+@@ -220,10 +277,11 @@ sysexit_from_sys_call:
+ 1: setbe %al /* 1 if error, 0 if not */
+ movzbl %al, %edi /* zero-extend that into %edi */
+ call __audit_syscall_exit
+ GET_THREAD_INFO(%r11)
- movq RAX(%rsp),%rax /* reload syscall return value */
- movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
+ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
-- testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-+ testl %edi,TI_flags(%r11)
- jz \exit
- CLEAR_RREGS
- jmp int_with_check
-@@ -295,7 +354,7 @@ sysenter_fix_flags:
+- testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ testl %edi, TI_flags(%r11)
+ jz \exit
+ xorl %eax, %eax /* Do not leak kernel information */
+ movq %rax, R11(%rsp)
+@@ -249,7 +307,7 @@ sysenter_fix_flags:
sysenter_tracesys:
#ifdef CONFIG_AUDITSYSCALL
- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%r11)
jz sysenter_auditsys
#endif
SAVE_EXTRA_REGS
-@@ -307,6 +366,9 @@ sysenter_tracesys:
+@@ -269,6 +327,9 @@ sysenter_tracesys:
+ movl %eax, %eax /* zero extension */
+
RESTORE_EXTRA_REGS
- cmpq $(IA32_NR_syscalls-1),%rax
- ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
+
+ pax_erase_kstack
+
jmp sysenter_do_call
- CFI_ENDPROC
- ENDPROC(ia32_sysenter_target)
-@@ -357,7 +419,6 @@ ENTRY(ia32_cstar_target)
- movl %esp,%r8d
- CFI_REGISTER rsp,r8
- movq PER_CPU_VAR(kernel_stack),%rsp
+ ENDPROC(entry_SYSENTER_compat)
+
+@@ -311,7 +372,6 @@ ENTRY(entry_SYSCALL_compat)
+ SWAPGS_UNSAFE_STACK
+ movl %esp, %r8d
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
- ENABLE_INTERRUPTS(CLBR_NONE)
/* Zero-extending 32-bit regs, do not remove */
- movl %eax,%eax
-@@ -380,16 +441,41 @@ ENTRY(ia32_cstar_target)
- sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
- CFI_ADJUST_CFA_OFFSET 10*8
+ movl %eax, %eax
+@@ -331,16 +391,41 @@ ENTRY(entry_SYSCALL_compat)
+ pushq $-ENOSYS /* pt_regs->ax */
+ sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12
++ movq %r12, R12(%rsp)
+#endif
+
+ pax_enter_kernel_user
@@ -14917,22 +16840,22 @@ index 72bf268..127572a 100644
+ ENABLE_INTERRUPTS(CLBR_NONE)
+
/*
- * no need to do an access_ok check here because r8 has been
- * 32bit zero extended
+ * No need to do an access_ok check here because r8 has been
+ * 32-bit zero extended:
*/
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ ASM_PAX_OPEN_USERLAND
-+ movq pax_user_shadow_base,%r8
-+ addq RSP(%rsp),%r8
++ movq pax_user_shadow_base, %r8
++ addq RSP(%rsp), %r8
+#endif
+
ASM_STAC
- 1: movl (%r8),%r9d
- _ASM_EXTABLE(1b,ia32_badarg)
+ 1: movl (%r8), %r9d
+ _ASM_EXTABLE(1b, ia32_badarg)
ASM_CLAC
-- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ ASM_PAX_CLOSE_USERLAND
@@ -14941,46 +16864,47 @@ index 72bf268..127572a 100644
+ GET_THREAD_INFO(%r11)
+ orl $TS_COMPAT,TI_status(%r11)
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
- CFI_REMEMBER_STATE
- jnz cstar_tracesys
- cmpq $IA32_NR_syscalls-1,%rax
-@@ -404,12 +490,15 @@ cstar_do_call:
- cstar_dispatch:
- call *ia32_sys_call_table(,%rax,8)
- movq %rax,RAX(%rsp)
+ jnz cstar_tracesys
+
+ cstar_do_call:
+@@ -358,13 +443,16 @@ cstar_dispatch:
+ call *ia32_sys_call_table(, %rax, 8)
+ movq %rax, RAX(%rsp)
+ 1:
+ GET_THREAD_INFO(%r11)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
-- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
- jnz sysretl_audit
+- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ testl $_TIF_ALLWORK_MASK, TI_flags(%r11)
+ jnz sysretl_audit
+
sysretl_from_sys_call:
-- andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+- andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+ pax_exit_kernel_user
+ pax_erase_kstack
-+ andl $~TS_COMPAT,TI_status(%r11)
++ andl $~TS_COMPAT, TI_status(%r11)
RESTORE_RSI_RDI_RDX
- movl RIP(%rsp),%ecx
- CFI_REGISTER rip,rcx
-@@ -451,7 +540,7 @@ sysretl_audit:
+ movl RIP(%rsp), %ecx
+ movl EFLAGS(%rsp), %r11d
+@@ -403,7 +491,7 @@ sysretl_audit:
cstar_tracesys:
#ifdef CONFIG_AUDITSYSCALL
-- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
- jz cstar_auditsys
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%r11)
+ jz cstar_auditsys
#endif
- xchgl %r9d,%ebp
-@@ -465,11 +554,19 @@ cstar_tracesys:
- xchgl %ebp,%r9d
- cmpq $(IA32_NR_syscalls-1),%rax
- ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
+ xchgl %r9d, %ebp
+@@ -426,11 +514,19 @@ cstar_tracesys:
+
+ RESTORE_EXTRA_REGS
+ xchgl %ebp, %r9d
+
+ pax_erase_kstack
+
- jmp cstar_do_call
- END(ia32_cstar_target)
-
+ jmp cstar_do_call
+ END(entry_SYSCALL_compat)
+
ia32_badarg:
ASM_CLAC
+
@@ -14988,13 +16912,13 @@ index 72bf268..127572a 100644
+ ASM_PAX_CLOSE_USERLAND
+#endif
+
- movq $-EFAULT,%rax
- jmp ia32_sysret
- CFI_ENDPROC
-@@ -505,14 +602,8 @@ ENTRY(ia32_syscall)
- /*CFI_REL_OFFSET cs,1*8 */
- CFI_REL_OFFSET rip,0*8
+ movq $-EFAULT, RAX(%rsp)
+ ia32_ret_from_sys_call:
+ xorl %eax, %eax /* Do not leak kernel information */
+@@ -462,14 +558,8 @@ ia32_ret_from_sys_call:
+ */
+ ENTRY(entry_INT80_compat)
- /*
- * Interrupts are off on entry.
- * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
@@ -15005,15 +16929,15 @@ index 72bf268..127572a 100644
- ENABLE_INTERRUPTS(CLBR_NONE)
/* Zero-extending 32-bit regs, do not remove */
- movl %eax,%eax
-@@ -528,8 +619,26 @@ ENTRY(ia32_syscall)
- sub $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
- CFI_ADJUST_CFA_OFFSET 10*8
+ movl %eax, %eax
+@@ -488,8 +578,26 @@ ENTRY(entry_INT80_compat)
+ cld
+ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
-- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12
++ movq %r12, R12(%rsp)
+#endif
+
+ pax_enter_kernel_user
@@ -15030,26 +16954,371 @@ index 72bf268..127572a 100644
+ ENABLE_INTERRUPTS(CLBR_NONE)
+
+ GET_THREAD_INFO(%r11)
-+ orl $TS_COMPAT,TI_status(%r11)
-+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
- jnz ia32_tracesys
- cmpq $(IA32_NR_syscalls-1),%rax
- ja ia32_badsys
-@@ -557,6 +666,9 @@ ia32_tracesys:
++ orl $TS_COMPAT, TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%r11)
+ jnz ia32_tracesys
+
+ ia32_do_call:
+@@ -524,6 +632,9 @@ ia32_tracesys:
+ movl RDI(%rsp), %edi
+ movl %eax, %eax /* zero extension */
RESTORE_EXTRA_REGS
- cmpq $(IA32_NR_syscalls-1),%rax
- ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
+
+ pax_erase_kstack
+
- jmp ia32_do_call
- END(ia32_syscall)
+ jmp ia32_do_call
+ END(entry_INT80_compat)
+
+diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
+index efb2b93..8a9cb8e 100644
+--- a/arch/x86/entry/thunk_64.S
++++ b/arch/x86/entry/thunk_64.S
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
+ #include "calling.h"
+ #include <asm/asm.h>
++#include <asm/alternative-asm.h>
+
+ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
+ .macro THUNK name, func, put_ret_addr_in_rdi=0
+@@ -62,6 +63,7 @@ restore:
+ popq %rdx
+ popq %rsi
+ popq %rdi
++ pax_force_retaddr
+ ret
+ _ASM_NOKPROBE(restore)
+ #endif
+diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
+index e970320..c006fea 100644
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
+ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+
+-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
++VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
+ $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
+index 0224987..8deb742 100644
+--- a/arch/x86/entry/vdso/vdso2c.h
++++ b/arch/x86/entry/vdso/vdso2c.h
+@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
+ unsigned long load_size = -1; /* Work around bogus warning */
+ unsigned long mapping_size;
+ ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
+- int i;
++ unsigned int i;
+ unsigned long j;
+ ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
+ *alt_sec = NULL;
+@@ -83,7 +83,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
+ for (i = 0;
+ i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
+ i++) {
+- int k;
++ unsigned int k;
+ ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
+ GET_LE(&symtab_hdr->sh_entsize) * i;
+ const char *name = raw_addr + GET_LE(&strtab_hdr->sh_offset) +
+diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
+index 1c9f750..cfddb1a 100644
+--- a/arch/x86/entry/vdso/vma.c
++++ b/arch/x86/entry/vdso/vma.c
+@@ -19,10 +19,7 @@
+ #include <asm/page.h>
+ #include <asm/hpet.h>
+ #include <asm/desc.h>
+-
+-#if defined(CONFIG_X86_64)
+-unsigned int __read_mostly vdso64_enabled = 1;
+-#endif
++#include <asm/mman.h>
+
+ void __init init_vdso_image(const struct vdso_image *image)
+ {
+@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
+ .pages = no_pages,
+ };
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ calculate_addr = false;
++#endif
++
+ if (calculate_addr) {
+ addr = vdso_addr(current->mm->start_stack,
+ image->size - image->sym_vvar_start);
+@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
+ down_write(&mm->mmap_sem);
+
+ addr = get_unmapped_area(NULL, addr,
+- image->size - image->sym_vvar_start, 0, 0);
++ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ text_start = addr - image->sym_vvar_start;
+- current->mm->context.vdso = (void __user *)text_start;
++ mm->context.vdso = text_start;
+
+ /*
+ * MAYWRITE to allow gdb to COW and set breakpoints
+@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
+ hpet_address >> PAGE_SHIFT,
+ PAGE_SIZE,
+ pgprot_noncached(PAGE_READONLY));
+-
+- if (ret)
+- goto up_fail;
+ }
+ #endif
+ up_fail:
+ if (ret)
+- current->mm->context.vdso = NULL;
++ current->mm->context.vdso = 0;
+
+ up_write(&mm->mmap_sem);
+ return ret;
+@@ -191,8 +190,8 @@ static int load_vdso32(void)
+
+ if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
+ current_thread_info()->sysenter_return =
+- current->mm->context.vdso +
+- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
++ (void __force_user *)(current->mm->context.vdso +
++ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
+
+ return 0;
+ }
+@@ -201,9 +200,6 @@ static int load_vdso32(void)
+ #ifdef CONFIG_X86_64
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+- if (!vdso64_enabled)
+- return 0;
+-
+ return map_vdso(&vdso_image_64, true);
+ }
+
+@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp)
+ {
+ #ifdef CONFIG_X86_X32_ABI
+- if (test_thread_flag(TIF_X32)) {
+- if (!vdso64_enabled)
+- return 0;
+-
++ if (test_thread_flag(TIF_X32))
+ return map_vdso(&vdso_image_x32, true);
+- }
+ #endif
+
+ return load_vdso32();
+@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ #endif
+
+ #ifdef CONFIG_X86_64
+-static __init int vdso_setup(char *s)
+-{
+- vdso64_enabled = simple_strtoul(s, NULL, 0);
+- return 0;
+-}
+-__setup("vdso=", vdso_setup);
+-#endif
+-
+-#ifdef CONFIG_X86_64
+ static void vgetcpu_cpu_init(void *arg)
+ {
+ int cpu = smp_processor_id();
+diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
+index 2dcc6ff..082dc7a 100644
+--- a/arch/x86/entry/vsyscall/vsyscall_64.c
++++ b/arch/x86/entry/vsyscall/vsyscall_64.c
+@@ -38,15 +38,13 @@
+ #define CREATE_TRACE_POINTS
+ #include "vsyscall_trace.h"
+
+-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
++static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
+
+ static int __init vsyscall_setup(char *str)
+ {
+ if (str) {
+ if (!strcmp("emulate", str))
+ vsyscall_mode = EMULATE;
+- else if (!strcmp("native", str))
+- vsyscall_mode = NATIVE;
+ else if (!strcmp("none", str))
+ vsyscall_mode = NONE;
+ else
+@@ -264,8 +262,7 @@ do_ret:
+ return true;
+
+ sigsegv:
+- force_sig(SIGSEGV, current);
+- return true;
++ do_group_exit(SIGKILL);
+ }
+
+ /*
+@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
+ static struct vm_area_struct gate_vma = {
+ .vm_start = VSYSCALL_ADDR,
+ .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
+- .vm_page_prot = PAGE_READONLY_EXEC,
+- .vm_flags = VM_READ | VM_EXEC,
++ .vm_page_prot = PAGE_READONLY,
++ .vm_flags = VM_READ,
+ .vm_ops = &gate_vma_ops,
+ };
+
+@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
+ unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
+
+ if (vsyscall_mode != NONE)
+- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
+- vsyscall_mode == NATIVE
+- ? PAGE_KERNEL_VSYSCALL
+- : PAGE_KERNEL_VVAR);
++ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
+
+ BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
+ (unsigned long)VSYSCALL_ADDR);
+diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
+index ae6aad1..719d6d9 100644
+--- a/arch/x86/ia32/ia32_aout.c
++++ b/arch/x86/ia32/ia32_aout.c
+@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
+ unsigned long dump_start, dump_size;
+ struct user32 dump;
+
++ memset(&dump, 0, sizeof(dump));
++
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ has_dumped = 1;
+diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
+index ae3a29a..cea65e9 100644
+--- a/arch/x86/ia32/ia32_signal.c
++++ b/arch/x86/ia32/ia32_signal.c
+@@ -216,7 +216,7 @@ asmlinkage long sys32_sigreturn(void)
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_COMPAT_NSIG_WORDS > 1
+ && __copy_from_user((((char *) &set.sig) + 4),
+- &frame->extramask,
++ frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+@@ -336,7 +336,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ sp -= frame_size;
+ /* Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ return (void __user *) sp;
+ }
+
+@@ -381,10 +381,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+ } else {
+ /* Return stub is in 32bit vsyscall page */
+ if (current->mm->context.vdso)
+- restorer = current->mm->context.vdso +
+- selected_vdso32->sym___kernel_sigreturn;
++ restorer = (void __force_user *)(current->mm->context.vdso +
++ selected_vdso32->sym___kernel_sigreturn);
+ else
+- restorer = &frame->retcode;
++ restorer = frame->retcode;
+ }
+
+ put_user_try {
+@@ -394,7 +394,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+ * These are actually not used anymore, but left because some
+ * gdb versions depend on them as a marker.
+ */
+- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -436,7 +436,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+ 0xb8,
+ __NR_ia32_rt_sigreturn,
+ 0x80cd,
+- 0,
++ 0
+ };
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
+@@ -459,16 +459,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+
+ if (ksig->ka.sa.sa_flags & SA_RESTORER)
+ restorer = ksig->ka.sa.sa_restorer;
++ else if (current->mm->context.vdso)
++ /* Return stub is in 32bit vsyscall page */
++ restorer = (void __force_user *)(current->mm->context.vdso +
++ selected_vdso32->sym___kernel_rt_sigreturn);
+ else
+- restorer = current->mm->context.vdso +
+- selected_vdso32->sym___kernel_rt_sigreturn;
++ restorer = frame->retcode;
+ put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+ /*
+ * Not actually used anymore, but left because some gdb
+ * versions need it.
+ */
+- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
-index 719cd70..69d576b 100644
+index 719cd70..72af944 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
-@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
+@@ -49,18 +49,26 @@
+
+ #define AA(__x) ((unsigned long)(__x))
+
++static inline loff_t compose_loff(unsigned int high, unsigned int low)
++{
++ loff_t retval = low;
++
++ BUILD_BUG_ON(sizeof retval != sizeof low + sizeof high);
++ __builtin_memcpy((unsigned char *)&retval + sizeof low, &high, sizeof high);
++ return retval;
++}
+
+ asmlinkage long sys32_truncate64(const char __user *filename,
+- unsigned long offset_low,
+- unsigned long offset_high)
++ unsigned int offset_low,
++ unsigned int offset_high)
+ {
+- return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
++ return sys_truncate(filename, compose_loff(offset_high, offset_low));
+ }
+
+-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
+- unsigned long offset_high)
++asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned int offset_low,
++ unsigned int offset_high)
+ {
+- return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
++ return sys_ftruncate(fd, ((unsigned long) offset_high << 32) | offset_low);
+ }
+
+ /*
+@@ -69,8 +77,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
*/
static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
{
@@ -15060,8 +17329,54 @@ index 719cd70..69d576b 100644
SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
+@@ -196,29 +204,29 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
+ __u32 len_low, __u32 len_high, int advice)
+ {
+ return sys_fadvise64_64(fd,
+- (((u64)offset_high)<<32) | offset_low,
+- (((u64)len_high)<<32) | len_low,
++ compose_loff(offset_high, offset_low),
++ compose_loff(len_high, len_low),
+ advice);
+ }
+
+ asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
+ size_t count)
+ {
+- return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
++ return sys_readahead(fd, compose_loff(off_hi, off_lo), count);
+ }
+
+ asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
+ unsigned n_low, unsigned n_hi, int flags)
+ {
+ return sys_sync_file_range(fd,
+- ((u64)off_hi << 32) | off_low,
+- ((u64)n_hi << 32) | n_low, flags);
++ compose_loff(off_hi, off_low),
++ compose_loff(n_hi, n_low), flags);
+ }
+
+ asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
+- size_t len, int advice)
++ int len, int advice)
+ {
+- return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
++ return sys_fadvise64_64(fd, compose_loff(offset_hi, offset_lo),
+ len, advice);
+ }
+
+@@ -226,6 +234,6 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
+ unsigned offset_hi, unsigned len_lo,
+ unsigned len_hi)
+ {
+- return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
+- ((u64)len_hi << 32) | len_lo);
++ return sys_fallocate(fd, mode, compose_loff(offset_hi, offset_lo),
++ compose_loff(len_hi, len_lo));
+ }
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
-index bdf02ee..51a4656 100644
+index e7636ba..e1fb78a 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -18,6 +18,45 @@
@@ -15107,10 +17422,10 @@ index bdf02ee..51a4656 100644
+ .endm
+#endif
+
- .macro altinstruction_entry orig alt feature orig_len alt_len pad_len
- .long \orig - .
- .long \alt - .
-@@ -38,7 +77,7 @@
+ /*
+ * Issue one struct alt_instr descriptor entry (need to put it into
+ * the section .altinstructions, see below). This entry contains
+@@ -50,7 +89,7 @@
altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
.popsection
@@ -15119,7 +17434,7 @@ index bdf02ee..51a4656 100644
143:
\newinstr
144:
-@@ -68,7 +107,7 @@
+@@ -86,7 +125,7 @@
altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
.popsection
@@ -15129,10 +17444,10 @@ index bdf02ee..51a4656 100644
\newinstr1
144:
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
-index ba32af0..ff42fc0 100644
+index 7bfc85b..65d1ec4 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
-@@ -130,7 +130,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+@@ -136,7 +136,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
".pushsection .altinstructions,\"a\"\n" \
ALTINSTR_ENTRY(feature, 1) \
".popsection\n" \
@@ -15141,7 +17456,7 @@ index ba32af0..ff42fc0 100644
ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
".popsection"
-@@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+@@ -146,7 +146,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
ALTINSTR_ENTRY(feature1, 1) \
ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" \
@@ -15151,7 +17466,7 @@ index ba32af0..ff42fc0 100644
ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
".popsection"
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
-index 976b86a..f3bc83a 100644
+index c839363..b9a8c43 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
@@ -15186,10 +17501,10 @@ index 20370c6..a2eb9b0 100644
"popl %%ebp\n\t"
"popl %%edi\n\t"
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
-index 5e5cd12..51cdc93 100644
+index e916895..42d729d 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
-@@ -28,6 +28,17 @@ static inline int atomic_read(const atomic_t *v)
+@@ -28,6 +28,17 @@ static __always_inline int atomic_read(const atomic_t *v)
}
/**
@@ -15198,7 +17513,7 @@ index 5e5cd12..51cdc93 100644
+ *
+ * Atomically reads the value of @v.
+ */
-+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
++static __always_inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
+{
+ return ACCESS_ONCE((v)->counter);
+}
@@ -15207,7 +17522,7 @@ index 5e5cd12..51cdc93 100644
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
-@@ -40,6 +51,18 @@ static inline void atomic_set(atomic_t *v, int i)
+@@ -40,6 +51,18 @@ static __always_inline void atomic_set(atomic_t *v, int i)
}
/**
@@ -15217,7 +17532,7 @@ index 5e5cd12..51cdc93 100644
+ *
+ * Atomically sets the value of @v to @i.
+ */
-+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++static __always_inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
+{
+ v->counter = i;
+}
@@ -15226,9 +17541,9 @@ index 5e5cd12..51cdc93 100644
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
-@@ -48,7 +71,29 @@ static inline void atomic_set(atomic_t *v, int i)
+@@ -48,7 +71,29 @@ static __always_inline void atomic_set(atomic_t *v, int i)
*/
- static inline void atomic_add(int i, atomic_t *v)
+ static __always_inline void atomic_add(int i, atomic_t *v)
{
- asm volatile(LOCK_PREFIX "addl %1,%0"
+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
@@ -15251,15 +17566,15 @@ index 5e5cd12..51cdc93 100644
+ *
+ * Atomically adds @i to @v.
+ */
-+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++static __always_inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
+{
+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
: "+m" (v->counter)
: "ir" (i));
}
-@@ -62,7 +107,29 @@ static inline void atomic_add(int i, atomic_t *v)
+@@ -62,7 +107,29 @@ static __always_inline void atomic_add(int i, atomic_t *v)
*/
- static inline void atomic_sub(int i, atomic_t *v)
+ static __always_inline void atomic_sub(int i, atomic_t *v)
{
- asm volatile(LOCK_PREFIX "subl %1,%0"
+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
@@ -15282,24 +17597,24 @@ index 5e5cd12..51cdc93 100644
+ *
+ * Atomically subtracts @i from @v.
+ */
-+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++static __always_inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
+{
+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
: "+m" (v->counter)
: "ir" (i));
}
-@@ -78,7 +145,7 @@ static inline void atomic_sub(int i, atomic_t *v)
+@@ -78,7 +145,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
*/
- static inline int atomic_sub_and_test(int i, atomic_t *v)
+ static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
}
/**
-@@ -89,7 +156,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+@@ -89,7 +156,27 @@ static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
*/
- static inline void atomic_inc(atomic_t *v)
+ static __always_inline void atomic_inc(atomic_t *v)
{
- asm volatile(LOCK_PREFIX "incl %0"
+ asm volatile(LOCK_PREFIX "incl %0\n"
@@ -15320,15 +17635,15 @@ index 5e5cd12..51cdc93 100644
+ *
+ * Atomically increments @v by 1.
+ */
-+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++static __always_inline void atomic_inc_unchecked(atomic_unchecked_t *v)
+{
+ asm volatile(LOCK_PREFIX "incl %0\n"
: "+m" (v->counter));
}
-@@ -101,7 +188,27 @@ static inline void atomic_inc(atomic_t *v)
+@@ -101,7 +188,27 @@ static __always_inline void atomic_inc(atomic_t *v)
*/
- static inline void atomic_dec(atomic_t *v)
+ static __always_inline void atomic_dec(atomic_t *v)
{
- asm volatile(LOCK_PREFIX "decl %0"
+ asm volatile(LOCK_PREFIX "decl %0\n"
@@ -15349,24 +17664,24 @@ index 5e5cd12..51cdc93 100644
+ *
+ * Atomically decrements @v by 1.
+ */
-+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++static __always_inline void atomic_dec_unchecked(atomic_unchecked_t *v)
+{
+ asm volatile(LOCK_PREFIX "decl %0\n"
: "+m" (v->counter));
}
-@@ -115,7 +222,7 @@ static inline void atomic_dec(atomic_t *v)
+@@ -115,7 +222,7 @@ static __always_inline void atomic_dec(atomic_t *v)
*/
- static inline int atomic_dec_and_test(atomic_t *v)
+ static __always_inline int atomic_dec_and_test(atomic_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
}
/**
-@@ -128,7 +235,20 @@ static inline int atomic_dec_and_test(atomic_t *v)
+@@ -128,7 +235,20 @@ static __always_inline int atomic_dec_and_test(atomic_t *v)
*/
- static inline int atomic_inc_and_test(atomic_t *v)
+ static __always_inline int atomic_inc_and_test(atomic_t *v)
{
- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
@@ -15380,27 +17695,27 @@ index 5e5cd12..51cdc93 100644
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
-+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++static __always_inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+{
+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
}
/**
-@@ -142,7 +262,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
+@@ -142,7 +262,7 @@ static __always_inline int atomic_inc_and_test(atomic_t *v)
*/
- static inline int atomic_add_negative(int i, atomic_t *v)
+ static __always_inline int atomic_add_negative(int i, atomic_t *v)
{
- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
}
/**
-@@ -152,7 +272,19 @@ static inline int atomic_add_negative(int i, atomic_t *v)
+@@ -152,7 +272,19 @@ static __always_inline int atomic_add_negative(int i, atomic_t *v)
*
* Atomically adds @i to @v and returns @i + @v
*/
--static inline int atomic_add_return(int i, atomic_t *v)
-+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
+-static __always_inline int atomic_add_return(int i, atomic_t *v)
++static __always_inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
+{
+ return i + xadd_check_overflow(&v->counter, i);
+}
@@ -15408,38 +17723,38 @@ index 5e5cd12..51cdc93 100644
+/**
+ * atomic_add_return_unchecked - add integer and return
+ * @i: integer value to add
-+ * @v: pointer of type atomic_unchecked_t
++ * @v: pointer of type atomi_uncheckedc_t
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
-+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++static __always_inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
{
return i + xadd(&v->counter, i);
}
-@@ -164,15 +296,24 @@ static inline int atomic_add_return(int i, atomic_t *v)
+@@ -164,15 +296,24 @@ static __always_inline int atomic_add_return(int i, atomic_t *v)
*
* Atomically subtracts @i from @v and returns @v - @i
*/
--static inline int atomic_sub_return(int i, atomic_t *v)
-+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
+-static __always_inline int atomic_sub_return(int i, atomic_t *v)
++static __always_inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
}
#define atomic_inc_return(v) (atomic_add_return(1, v))
-+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++static __always_inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+{
+ return atomic_add_return_unchecked(1, v);
+}
#define atomic_dec_return(v) (atomic_sub_return(1, v))
--static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
+-static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
++static __always_inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+ return cmpxchg(&v->counter, old, new);
+}
+
-+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++static __always_inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
{
return cmpxchg(&v->counter, old, new);
}
@@ -15457,7 +17772,7 @@ index 5e5cd12..51cdc93 100644
* @v: pointer of type atomic_t
@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
*/
- static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
- int c, old;
+ int c, old, new;
@@ -15484,7 +17799,7 @@ index 5e5cd12..51cdc93 100644
if (likely(old == c))
break;
c = old;
-@@ -207,6 +366,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+@@ -207,6 +366,49 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
}
/**
@@ -15534,7 +17849,7 @@ index 5e5cd12..51cdc93 100644
* atomic_inc_short - increment of a short integer
* @v: pointer to type int
*
-@@ -220,14 +422,37 @@ static inline short int atomic_inc_short(short int *v)
+@@ -220,14 +422,37 @@ static __always_inline short int atomic_inc_short(short int *v)
}
/* These are x86-specific, used by some header files */
@@ -15580,7 +17895,7 @@ index 5e5cd12..51cdc93 100644
#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
-index b154de7..bf18a5a 100644
+index b154de7..3dc335d 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -12,6 +12,14 @@ typedef struct {
@@ -15684,7 +17999,7 @@ index b154de7..bf18a5a 100644
+ *
+ * Atomically reads the value of @v and returns it.
+ */
-+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
++static inline long long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
+{
+ long long r;
+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
@@ -15755,7 +18070,7 @@ index b154de7..bf18a5a 100644
* @i: integer value to subtract
* @v: pointer to type atomic64_t
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
-index f8d273e..02f39f3 100644
+index b965f9e..8e22dd3 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
@@ -15798,7 +18113,7 @@ index f8d273e..02f39f3 100644
* @v: pointer to type atomic64_t
@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
*/
- static inline void atomic64_add(long i, atomic64_t *v)
+ static __always_inline void atomic64_add(long i, atomic64_t *v)
{
+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
+
@@ -15820,12 +18135,12 @@ index f8d273e..02f39f3 100644
+ *
+ * Atomically adds @i to @v.
+ */
-+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
++static __always_inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
+{
asm volatile(LOCK_PREFIX "addq %1,%0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter));
-@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
+@@ -56,7 +102,29 @@ static __always_inline void atomic64_add(long i, atomic64_t *v)
*/
static inline void atomic64_sub(long i, atomic64_t *v)
{
@@ -15867,7 +18182,7 @@ index f8d273e..02f39f3 100644
/**
@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
*/
- static inline void atomic64_inc(atomic64_t *v)
+ static __always_inline void atomic64_inc(atomic64_t *v)
{
+ asm volatile(LOCK_PREFIX "incq %0\n"
+
@@ -15888,14 +18203,14 @@ index f8d273e..02f39f3 100644
+ *
+ * Atomically increments @v by 1.
+ */
-+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++static __always_inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
+{
asm volatile(LOCK_PREFIX "incq %0"
: "=m" (v->counter)
: "m" (v->counter));
-@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v)
+@@ -96,7 +185,28 @@ static __always_inline void atomic64_inc(atomic64_t *v)
*/
- static inline void atomic64_dec(atomic64_t *v)
+ static __always_inline void atomic64_dec(atomic64_t *v)
{
- asm volatile(LOCK_PREFIX "decq %0"
+ asm volatile(LOCK_PREFIX "decq %0\n"
@@ -15917,13 +18232,13 @@ index f8d273e..02f39f3 100644
+ *
+ * Atomically decrements @v by 1.
+ */
-+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++static __always_inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
+{
+ asm volatile(LOCK_PREFIX "decq %0\n"
: "=m" (v->counter)
: "m" (v->counter));
}
-@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v)
+@@ -111,7 +221,7 @@ static __always_inline void atomic64_dec(atomic64_t *v)
*/
static inline int atomic64_dec_and_test(atomic64_t *v)
{
@@ -15952,7 +18267,7 @@ index f8d273e..02f39f3 100644
/**
@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
*/
- static inline long atomic64_add_return(long i, atomic64_t *v)
+ static __always_inline long atomic64_add_return(long i, atomic64_t *v)
{
+ return i + xadd_check_overflow(&v->counter, i);
+}
@@ -15964,7 +18279,7 @@ index f8d273e..02f39f3 100644
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
-+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++static __always_inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
+{
return i + xadd(&v->counter, i);
}
@@ -16028,7 +18343,7 @@ index f8d273e..02f39f3 100644
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
-index 959e45b..6ea9bf6 100644
+index e51a8f8..ee075df 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -57,7 +57,7 @@
@@ -16135,10 +18450,10 @@ index cfe3b95..d01b118 100644
int bitpos = -1;
/*
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
-index 4fa687a..60f2d39 100644
+index 4fa687a..4ca636f 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
-@@ -6,10 +6,15 @@
+@@ -6,7 +6,7 @@
#include <uapi/asm/boot.h>
/* Physical address where kernel should be loaded. */
@@ -16147,14 +18462,6 @@ index 4fa687a..60f2d39 100644
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
& ~(CONFIG_PHYSICAL_ALIGN - 1))
-+#ifndef __ASSEMBLY__
-+extern unsigned char __LOAD_PHYSICAL_ADDR[];
-+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
-+#endif
-+
- /* Minimum kernel alignment, as a power of two */
- #ifdef CONFIG_X86_64
- #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
index 48f99f1..d78ebf9 100644
--- a/arch/x86/include/asm/cache.h
@@ -16175,164 +18482,6 @@ index 48f99f1..d78ebf9 100644
#ifdef CONFIG_X86_VSMP
#ifdef CONFIG_SMP
-diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
-index 1c8b50e..166bcaa 100644
---- a/arch/x86/include/asm/calling.h
-+++ b/arch/x86/include/asm/calling.h
-@@ -96,23 +96,26 @@ For 32-bit we have the following conventions - kernel is built with
- .endm
-
- .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12+\offset
-+#endif
- .if \r11
-- movq_cfi r11, 6*8+\offset
-+ movq_cfi r11, R11+\offset
- .endif
- .if \r8910
-- movq_cfi r10, 7*8+\offset
-- movq_cfi r9, 8*8+\offset
-- movq_cfi r8, 9*8+\offset
-+ movq_cfi r10, R10+\offset
-+ movq_cfi r9, R9+\offset
-+ movq_cfi r8, R8+\offset
- .endif
- .if \rax
-- movq_cfi rax, 10*8+\offset
-+ movq_cfi rax, RAX+\offset
- .endif
- .if \rcx
-- movq_cfi rcx, 11*8+\offset
-+ movq_cfi rcx, RCX+\offset
- .endif
-- movq_cfi rdx, 12*8+\offset
-- movq_cfi rsi, 13*8+\offset
-- movq_cfi rdi, 14*8+\offset
-+ movq_cfi rdx, RDX+\offset
-+ movq_cfi rsi, RSI+\offset
-+ movq_cfi rdi, RDI+\offset
- .endm
- .macro SAVE_C_REGS offset=0
- SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
-@@ -131,76 +134,87 @@ For 32-bit we have the following conventions - kernel is built with
- .endm
-
- .macro SAVE_EXTRA_REGS offset=0
-- movq_cfi r15, 0*8+\offset
-- movq_cfi r14, 1*8+\offset
-- movq_cfi r13, 2*8+\offset
-- movq_cfi r12, 3*8+\offset
-- movq_cfi rbp, 4*8+\offset
-- movq_cfi rbx, 5*8+\offset
-+ movq_cfi r15, R15+\offset
-+ movq_cfi r14, R14+\offset
-+ movq_cfi r13, R13+\offset
-+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12+\offset
-+#endif
-+ movq_cfi rbp, RBP+\offset
-+ movq_cfi rbx, RBX+\offset
- .endm
- .macro SAVE_EXTRA_REGS_RBP offset=0
-- movq_cfi rbp, 4*8+\offset
-+ movq_cfi rbp, RBP+\offset
- .endm
-
- .macro RESTORE_EXTRA_REGS offset=0
-- movq_cfi_restore 0*8+\offset, r15
-- movq_cfi_restore 1*8+\offset, r14
-- movq_cfi_restore 2*8+\offset, r13
-- movq_cfi_restore 3*8+\offset, r12
-- movq_cfi_restore 4*8+\offset, rbp
-- movq_cfi_restore 5*8+\offset, rbx
-+ movq_cfi_restore R15+\offset, r15
-+ movq_cfi_restore R14+\offset, r14
-+ movq_cfi_restore R13+\offset, r13
-+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi_restore R12+\offset, r12
-+#endif
-+ movq_cfi_restore RBP+\offset, rbp
-+ movq_cfi_restore RBX+\offset, rbx
- .endm
-
- .macro ZERO_EXTRA_REGS
- xorl %r15d, %r15d
- xorl %r14d, %r14d
- xorl %r13d, %r13d
-+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
- xorl %r12d, %r12d
-+#endif
- xorl %ebp, %ebp
- xorl %ebx, %ebx
- .endm
-
-- .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
-+ .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1, rstor_r12=1
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ .if \rstor_r12
-+ movq_cfi_restore R12, r12
-+ .endif
-+#endif
- .if \rstor_r11
-- movq_cfi_restore 6*8, r11
-+ movq_cfi_restore R11, r11
- .endif
- .if \rstor_r8910
-- movq_cfi_restore 7*8, r10
-- movq_cfi_restore 8*8, r9
-- movq_cfi_restore 9*8, r8
-+ movq_cfi_restore R10, r10
-+ movq_cfi_restore R9, r9
-+ movq_cfi_restore R8, r8
- .endif
- .if \rstor_rax
-- movq_cfi_restore 10*8, rax
-+ movq_cfi_restore RAX, rax
- .endif
- .if \rstor_rcx
-- movq_cfi_restore 11*8, rcx
-+ movq_cfi_restore RCX, rcx
- .endif
- .if \rstor_rdx
-- movq_cfi_restore 12*8, rdx
-+ movq_cfi_restore RDX, rdx
- .endif
-- movq_cfi_restore 13*8, rsi
-- movq_cfi_restore 14*8, rdi
-+ movq_cfi_restore RSI, rsi
-+ movq_cfi_restore RDI, rdi
- .endm
- .macro RESTORE_C_REGS
-- RESTORE_C_REGS_HELPER 1,1,1,1,1
-+ RESTORE_C_REGS_HELPER 1,1,1,1,1,1
- .endm
- .macro RESTORE_C_REGS_EXCEPT_RAX
-- RESTORE_C_REGS_HELPER 0,1,1,1,1
-+ RESTORE_C_REGS_HELPER 0,1,1,1,1,0
- .endm
- .macro RESTORE_C_REGS_EXCEPT_RCX
-- RESTORE_C_REGS_HELPER 1,0,1,1,1
-+ RESTORE_C_REGS_HELPER 1,0,1,1,1,0
- .endm
- .macro RESTORE_C_REGS_EXCEPT_R11
-- RESTORE_C_REGS_HELPER 1,1,0,1,1
-+ RESTORE_C_REGS_HELPER 1,1,0,1,1,1
- .endm
- .macro RESTORE_C_REGS_EXCEPT_RCX_R11
-- RESTORE_C_REGS_HELPER 1,0,0,1,1
-+ RESTORE_C_REGS_HELPER 1,0,0,1,1,1
- .endm
- .macro RESTORE_RSI_RDI
-- RESTORE_C_REGS_HELPER 0,0,0,0,0
-+ RESTORE_C_REGS_HELPER 0,0,0,0,0,1
- .endm
- .macro RESTORE_RSI_RDI_RDX
-- RESTORE_C_REGS_HELPER 0,0,0,0,1
-+ RESTORE_C_REGS_HELPER 0,0,0,0,1,1
- .endm
-
- .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index f50de69..2b0a458 100644
--- a/arch/x86/include/asm/checksum_32.h
@@ -16371,10 +18520,10 @@ index f50de69..2b0a458 100644
clac();
return ret;
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
-index 99c105d7..2f667ac 100644
+index ad19841..0784041 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
-@@ -16,8 +16,12 @@ extern void __cmpxchg_wrong_size(void)
+@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
__compiletime_error("Bad argument size for cmpxchg");
extern void __xadd_wrong_size(void)
__compiletime_error("Bad argument size for xadd");
@@ -16387,7 +18536,7 @@ index 99c105d7..2f667ac 100644
/*
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
-@@ -69,6 +73,38 @@ extern void __add_wrong_size(void)
+@@ -67,6 +71,38 @@ extern void __add_wrong_size(void)
__ret; \
})
@@ -16426,7 +18575,7 @@ index 99c105d7..2f667ac 100644
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
* Since this is generally used to protect other memory information, we
-@@ -167,6 +203,9 @@ extern void __add_wrong_size(void)
+@@ -165,6 +201,9 @@ extern void __add_wrong_size(void)
#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
@@ -16527,7 +18676,7 @@ index 3d6606f..300641d 100644
"6:\n"
".previous\n"
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
-index a0bf89f..56f0b2a 100644
+index 4e10d73..7319a47 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -4,6 +4,7 @@
@@ -16636,8 +18785,8 @@ index a0bf89f..56f0b2a 100644
}
/* This intentionally ignores lm, since 32-bit apps don't have that field. */
-@@ -295,7 +308,7 @@ static inline void load_LDT(mm_context_t *pc)
- preempt_enable();
+@@ -280,7 +293,7 @@ static inline void clear_LDT(void)
+ set_ldt(NULL, 0);
}
-static inline unsigned long get_desc_base(const struct desc_struct *desc)
@@ -16645,7 +18794,7 @@ index a0bf89f..56f0b2a 100644
{
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
}
-@@ -319,7 +332,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
+@@ -304,7 +317,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
}
#ifdef CONFIG_X86_64
@@ -16654,7 +18803,7 @@ index a0bf89f..56f0b2a 100644
{
gate_desc s;
-@@ -329,14 +342,14 @@ static inline void set_nmi_gate(int gate, void *addr)
+@@ -314,14 +327,14 @@ static inline void set_nmi_gate(int gate, void *addr)
#endif
#ifdef CONFIG_TRACING
@@ -16672,7 +18821,7 @@ index a0bf89f..56f0b2a 100644
unsigned dpl, unsigned ist, unsigned seg)
{
gate_desc s;
-@@ -356,7 +369,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
+@@ -341,7 +354,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
#define _trace_set_gate(gate, type, addr, dpl, ist, seg)
#endif
@@ -16681,7 +18830,7 @@ index a0bf89f..56f0b2a 100644
unsigned dpl, unsigned ist, unsigned seg)
{
gate_desc s;
-@@ -379,14 +392,14 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
+@@ -364,14 +377,14 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
#define set_intr_gate_notrace(n, addr) \
do { \
BUG_ON((unsigned)n > 0xFF); \
@@ -16698,7 +18847,7 @@ index a0bf89f..56f0b2a 100644
0, 0, __KERNEL_CS); \
} while (0)
-@@ -414,19 +427,19 @@ static inline void alloc_system_vector(int vector)
+@@ -399,19 +412,19 @@ static inline void alloc_system_vector(int vector)
/*
* This routine sets up an interrupt gate at directory privilege level 3.
*/
@@ -16721,7 +18870,7 @@ index a0bf89f..56f0b2a 100644
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
-@@ -435,16 +448,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
+@@ -420,16 +433,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
{
BUG_ON((unsigned)n > 0xFF);
@@ -16741,7 +18890,7 @@ index a0bf89f..56f0b2a 100644
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
-@@ -516,4 +529,17 @@ static inline void load_current_idt(void)
+@@ -501,4 +514,17 @@ static inline void load_current_idt(void)
else
load_idt((const struct desc_ptr *)&idt_descr);
}
@@ -16907,11 +19056,11 @@ index 1c7eefe..d0e4702 100644
}
};
-diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
-index da5e967..ab07eec 100644
---- a/arch/x86/include/asm/fpu-internal.h
-+++ b/arch/x86/include/asm/fpu-internal.h
-@@ -151,8 +151,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 3c3550c..995858d 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -97,8 +97,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
#define user_insn(insn, output, input...) \
({ \
int err; \
@@ -16924,7 +19073,7 @@ index da5e967..ab07eec 100644
"2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"3: movl $-1,%[err]\n" \
-@@ -161,6 +164,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
+@@ -107,6 +110,7 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
_ASM_EXTABLE(1b, 3b) \
: [err] "=r" (err), output \
: "0"(0), input); \
@@ -16932,15 +19081,124 @@ index da5e967..ab07eec 100644
err; \
})
-@@ -327,7 +331,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
+@@ -186,9 +190,9 @@ static inline int copy_user_to_fregs(struct fregs_state __user *fx)
+ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
+ {
+ if (config_enabled(CONFIG_X86_32))
+- asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
++ asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
+ else if (config_enabled(CONFIG_AS_FXSAVEQ))
+- asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
++ asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
+ else {
+ /* Using "rex64; fxsave %0" is broken because, if the memory
+ * operand uses any extended registers for addressing, a second
+@@ -212,8 +216,8 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
+ * registers.
+ */
+ asm volatile( "rex64/fxsave (%[fx])"
+- : "=m" (fpu->state.fxsave)
+- : [fx] "R" (&fpu->state.fxsave));
++ : "=m" (fpu->state->fxsave)
++ : [fx] "R" (&fpu->state->fxsave));
+ }
+ }
+
+@@ -388,12 +392,16 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
+ if (unlikely(err))
+ return -EFAULT;
+
++ pax_open_userland();
+ __asm__ __volatile__(ASM_STAC "\n"
+- "1:"XSAVE"\n"
++ "1:"
++ __copyuser_seg
++ XSAVE"\n"
+ "2: " ASM_CLAC "\n"
+ xstate_fault(err)
+ : "D" (buf), "a" (-1), "d" (-1), "0" (err)
+ : "memory");
++ pax_close_userland();
+ return err;
+ }
+
+@@ -402,17 +410,21 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
+ */
+ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
+ {
+- struct xregs_state *xstate = ((__force struct xregs_state *)buf);
++ struct xregs_state *xstate = ((__force_kernel struct xregs_state *)buf);
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+ int err = 0;
+
++ pax_open_userland();
+ __asm__ __volatile__(ASM_STAC "\n"
+- "1:"XRSTOR"\n"
++ "1:"
++ __copyuser_seg
++ XRSTOR"\n"
+ "2: " ASM_CLAC "\n"
+ xstate_fault(err)
+ : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err)
+ : "memory"); /* memory required? */
++ pax_close_userland();
+ return err;
+ }
+
+@@ -429,7 +441,7 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
+ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
+ {
+ if (likely(use_xsave())) {
+- copy_xregs_to_kernel(&fpu->state.xsave);
++ copy_xregs_to_kernel(&fpu->state->xsave);
+ return 1;
+ }
+
+@@ -442,7 +454,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
+ * Legacy FPU register saving, FNSAVE always clears FPU registers,
+ * so we have to mark them inactive:
+ */
+- asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
++ asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state->fsave));
+
+ return 0;
+ }
+@@ -471,7 +483,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
"fnclex\n\t"
"emms\n\t"
"fildl %P[addr]" /* set F?P to defined value */
-- : : [addr] "m" (tsk->thread.fpu.has_fpu));
+- : : [addr] "m" (fpstate));
+ : : [addr] "m" (cpu_tss[raw_smp_processor_id()].x86_tss.sp0));
}
- return fpu_restore_checking(&tsk->thread.fpu);
+ __copy_kernel_to_fpregs(fpstate);
+@@ -643,7 +655,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+ static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
+ {
+ if (fpu_switch.preload)
+- copy_kernel_to_fpregs(&new_fpu->state);
++ copy_kernel_to_fpregs(new_fpu->state);
+ }
+
+ /*
+diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
+index c49c517..55ff1d0 100644
+--- a/arch/x86/include/asm/fpu/types.h
++++ b/arch/x86/include/asm/fpu/types.h
+@@ -287,10 +287,9 @@ struct fpu {
+ * logic, which unconditionally saves/restores all FPU state
+ * across context switches. (if FPU state exists.)
+ */
+- union fpregs_state state;
++ union fpregs_state *state;
+ /*
+- * WARNING: 'state' is dynamically-sized. Do not put
+- * anything after it here.
++ * WARNING: 'state' is dynamically-sized.
+ */
+ };
+
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index b4c1f54..e290c08 100644
--- a/arch/x86/include/asm/futex.h
@@ -17006,10 +19264,10 @@ index b4c1f54..e290c08 100644
pagefault_enable();
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
-index e9571dd..df5f542 100644
+index 6615032..9c233be 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
-@@ -160,8 +160,8 @@ static inline void unlock_vector_lock(void) {}
+@@ -158,8 +158,8 @@ static inline void unlock_vector_lock(void) {}
#endif /* CONFIG_X86_LOCAL_APIC */
/* Statistics */
@@ -17018,8 +19276,8 @@ index e9571dd..df5f542 100644
+extern atomic_unchecked_t irq_err_count;
+extern atomic_unchecked_t irq_mis_count;
- /* EISA */
- extern void eisa_set_level_irq(unsigned int irq);
+ extern void elcr_set_level_irq(unsigned int irq);
+
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index ccffa53..3c90c87 100644
--- a/arch/x86/include/asm/i8259.h
@@ -17034,10 +19292,18 @@ index ccffa53..3c90c87 100644
extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic;
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
-index 34a5b93..27e40a6 100644
+index cc9c61b..7b17f40 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
-@@ -52,12 +52,12 @@ static inline void name(type val, volatile void __iomem *addr) \
+@@ -42,6 +42,7 @@
+ #include <asm/page.h>
+ #include <asm/early_ioremap.h>
+ #include <asm/pgtable_types.h>
++#include <asm/processor.h>
+
+ #define build_mmio_read(name, size, type, reg, barrier) \
+ static inline type name(const volatile void __iomem *addr) \
+@@ -54,12 +55,12 @@ static inline void name(type val, volatile void __iomem *addr) \
"m" (*(volatile type __force *)addr) barrier); }
build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
@@ -17054,7 +19320,7 @@ index 34a5b93..27e40a6 100644
build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
build_mmio_write(writew, "w", unsigned short, "r", :"memory")
-@@ -113,7 +113,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
+@@ -115,7 +116,7 @@ build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
* this function
*/
@@ -17063,7 +19329,7 @@ index 34a5b93..27e40a6 100644
{
return __pa(address);
}
-@@ -189,7 +189,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+@@ -192,7 +193,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
return ioremap_nocache(offset, size);
}
@@ -17072,9 +19338,9 @@ index 34a5b93..27e40a6 100644
extern void set_iounmap_nonlazy(void);
-@@ -199,6 +199,17 @@ extern void set_iounmap_nonlazy(void);
+@@ -200,6 +201,17 @@ extern void set_iounmap_nonlazy(void);
- #include <linux/vmalloc.h>
+ #include <asm-generic/iomap.h>
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
@@ -17330,19 +19596,10 @@ index 0000000..2bfd3ba
+
+#endif /* X86_MMAN_H */
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
-index 09b9620..923aecd 100644
+index 364d274..e51b4bc 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
-@@ -9,7 +9,7 @@
- * we put the segment information here.
- */
- typedef struct {
-- void *ldt;
-+ struct desc_struct *ldt;
- int size;
-
- #ifdef CONFIG_X86_64
-@@ -18,7 +18,19 @@ typedef struct {
+@@ -17,7 +17,19 @@ typedef struct {
#endif
struct mutex lock;
@@ -17364,10 +19621,19 @@ index 09b9620..923aecd 100644
atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
} mm_context_t;
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
-index e997f70..5d819f7 100644
+index 984abfe..f9bac8b 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
-@@ -42,6 +42,20 @@ void destroy_context(struct mm_struct *mm);
+@@ -45,7 +45,7 @@ struct ldt_struct {
+ * allocations, but it's not worth trying to optimize.
+ */
+ struct desc_struct *entries;
+- int size;
++ unsigned int size;
+ };
+
+ static inline void load_mm_ldt(struct mm_struct *mm)
+@@ -86,6 +86,20 @@ void destroy_context(struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
@@ -17388,7 +19654,7 @@ index e997f70..5d819f7 100644
#ifdef CONFIG_SMP
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
-@@ -52,16 +66,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+@@ -96,16 +110,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
@@ -17448,10 +19714,10 @@ index e997f70..5d819f7 100644
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
/* Stop flush ipis for the previous mm */
-@@ -84,9 +141,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+@@ -128,9 +185,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
+ if (!(__supported_pte_mask & _PAGE_NX)) {
@@ -17517,7 +19783,7 @@ index e997f70..5d819f7 100644
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
-@@ -103,13 +218,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+@@ -147,13 +262,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
@@ -17528,7 +19794,7 @@ index e997f70..5d819f7 100644
+#endif
+
load_mm_cr4(next);
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
+
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
+ if (!(__supported_pte_mask & _PAGE_NX))
@@ -17725,7 +19991,7 @@ index b3bebf9..cb419e7 100644
#define __phys_reloc_hide(x) (x)
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
-index 8957810..f34efb4 100644
+index d143bfa..30d1f41 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
@@ -17777,8 +20043,8 @@ index 8957810..f34efb4 100644
+
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
- static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
-@@ -906,7 +933,7 @@ extern void default_banner(void);
+ #ifdef CONFIG_QUEUED_SPINLOCKS
+@@ -933,7 +960,7 @@ extern void default_banner(void);
#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
@@ -17787,7 +20053,7 @@ index 8957810..f34efb4 100644
#endif
#define INTERRUPT_RETURN \
-@@ -976,6 +1003,21 @@ extern void default_banner(void);
+@@ -1003,6 +1030,21 @@ extern void default_banner(void);
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
CLBR_NONE, \
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
@@ -17810,7 +20076,7 @@ index 8957810..f34efb4 100644
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
-index f7b0b5c..cdd33f9 100644
+index a6b8f9f..fd61ef7 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -84,7 +84,7 @@ struct pv_init_ops {
@@ -17838,7 +20104,7 @@ index f7b0b5c..cdd33f9 100644
struct pv_cpu_ops {
/* hooks for various privileged instructions */
-@@ -192,7 +192,7 @@ struct pv_cpu_ops {
+@@ -193,7 +193,7 @@ struct pv_cpu_ops {
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
@@ -17847,7 +20113,7 @@ index f7b0b5c..cdd33f9 100644
struct pv_irq_ops {
/*
-@@ -215,7 +215,7 @@ struct pv_irq_ops {
+@@ -216,7 +216,7 @@ struct pv_irq_ops {
#ifdef CONFIG_X86_64
void (*adjust_exception_frame)(void);
#endif
@@ -17856,7 +20122,7 @@ index f7b0b5c..cdd33f9 100644
struct pv_apic_ops {
#ifdef CONFIG_X86_LOCAL_APIC
-@@ -223,7 +223,7 @@ struct pv_apic_ops {
+@@ -224,7 +224,7 @@ struct pv_apic_ops {
unsigned long start_eip,
unsigned long start_esp);
#endif
@@ -17865,7 +20131,7 @@ index f7b0b5c..cdd33f9 100644
struct pv_mmu_ops {
unsigned long (*read_cr2)(void);
-@@ -313,6 +313,7 @@ struct pv_mmu_ops {
+@@ -314,6 +314,7 @@ struct pv_mmu_ops {
struct paravirt_callee_save make_pud;
void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
@@ -17873,7 +20139,7 @@ index f7b0b5c..cdd33f9 100644
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
-@@ -324,7 +325,13 @@ struct pv_mmu_ops {
+@@ -325,7 +326,13 @@ struct pv_mmu_ops {
an mfn. We can tell which is which from the index. */
void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
@@ -17888,10 +20154,10 @@ index f7b0b5c..cdd33f9 100644
struct arch_spinlock;
#ifdef CONFIG_SMP
-@@ -336,11 +343,14 @@ typedef u16 __ticket_t;
- struct pv_lock_ops {
+@@ -347,11 +354,14 @@ struct pv_lock_ops {
struct paravirt_callee_save lock_spinning;
void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
+ #endif /* !CONFIG_QUEUED_SPINLOCKS */
-};
+} __no_randomize_layout;
@@ -17905,7 +20171,7 @@ index f7b0b5c..cdd33f9 100644
struct paravirt_patch_template {
struct pv_init_ops pv_init_ops;
struct pv_time_ops pv_time_ops;
-@@ -349,7 +359,7 @@ struct paravirt_patch_template {
+@@ -360,7 +370,7 @@ struct paravirt_patch_template {
struct pv_apic_ops pv_apic_ops;
struct pv_mmu_ops pv_mmu_ops;
struct pv_lock_ops pv_lock_ops;
@@ -17969,10 +20235,17 @@ index bf7f8b5..ca5799d 100644
{
return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
-index fd74a11..35fd5af 100644
+index fd74a11..98bd591 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
-@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
+@@ -13,12 +13,16 @@
+ */
+ static inline void native_set_pte(pte_t *ptep , pte_t pte)
+ {
++ pax_open_kernel();
+ *ptep = pte;
++ pax_close_kernel();
+ }
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -17982,11 +20255,66 @@ index fd74a11..35fd5af 100644
}
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+@@ -34,13 +38,20 @@ static inline void native_pmd_clear(pmd_t *pmdp)
+ static inline void native_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *xp)
+ {
++ pax_open_kernel();
+ *xp = native_make_pte(0);
++ pax_close_kernel();
+ }
+
+ #ifdef CONFIG_SMP
+ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
+ {
+- return __pte(xchg(&xp->pte_low, 0));
++ pte_t pte;
++
++ pax_open_kernel();
++ pte = __pte(xchg(&xp->pte_low, 0));
++ pax_close_kernel();
++ return pte;
+ }
+ #else
+ #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
+@@ -49,7 +60,12 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
+ #ifdef CONFIG_SMP
+ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+ {
+- return __pmd(xchg((pmdval_t *)xp, 0));
++ pmd_t pmd;
++
++ pax_open_kernel();
++ pmd = __pmd(xchg((pmdval_t *)xp, 0));
++ pax_close_kernel();
++ return pmd;
+ }
+ #else
+ #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
-index cdaa58c..e61122b 100644
+index cdaa58c..4038692 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
-@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+@@ -26,9 +26,11 @@
+ */
+ static inline void native_set_pte(pte_t *ptep, pte_t pte)
+ {
++ pax_open_kernel();
+ ptep->pte_high = pte.pte_high;
+ smp_wmb();
+ ptep->pte_low = pte.pte_low;
++ pax_close_kernel();
+ }
+
+ #define pmd_read_atomic pmd_read_atomic
+@@ -87,17 +89,23 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
+
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
++ pax_close_kernel();
+ }
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -18003,8 +20331,55 @@ index cdaa58c..e61122b 100644
}
/*
+@@ -108,17 +116,22 @@ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+ {
++ pax_open_kernel();
+ ptep->pte_low = 0;
+ smp_wmb();
+ ptep->pte_high = 0;
++ pax_close_kernel();
+ }
+
+ static inline void native_pmd_clear(pmd_t *pmd)
+ {
+ u32 *tmp = (u32 *)pmd;
++
++ pax_open_kernel();
+ *tmp = 0;
+ smp_wmb();
+ *(tmp + 1) = 0;
++ pax_close_kernel();
+ }
+
+ static inline void pud_clear(pud_t *pudp)
+@@ -143,9 +156,11 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
+ pte_t res;
+
+ /* xchg acts as a barrier before the setting of the high bits */
++ pax_open_kernel();
+ res.pte_low = xchg(&ptep->pte_low, 0);
+ res.pte_high = ptep->pte_high;
+ ptep->pte_high = 0;
++ pax_close_kernel();
+
+ return res;
+ }
+@@ -166,9 +181,11 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
+ union split_pmd res, *orig = (union split_pmd *)pmdp;
+
+ /* xchg acts as a barrier before setting of the high bits */
++ pax_open_kernel();
+ res.pmd_low = xchg(&orig->pmd_low, 0);
+ res.pmd_high = orig->pmd_high;
+ orig->pmd_high = 0;
++ pax_close_kernel();
+
+ return res.pmd;
+ }
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
-index fe57e7a..0573d42 100644
+index 867da5b..7ec083d 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -47,6 +47,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
@@ -18112,7 +20487,7 @@ index fe57e7a..0573d42 100644
}
static inline pte_t pte_mkdirty(pte_t pte)
-@@ -420,6 +487,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+@@ -426,6 +493,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
#endif
#ifndef __ASSEMBLY__
@@ -18129,7 +20504,7 @@ index fe57e7a..0573d42 100644
#include <linux/mm_types.h>
#include <linux/mmdebug.h>
#include <linux/log2.h>
-@@ -571,7 +648,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
+@@ -577,7 +654,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
@@ -18138,7 +20513,7 @@ index fe57e7a..0573d42 100644
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
-@@ -611,7 +688,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+@@ -617,7 +694,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
@@ -18147,7 +20522,7 @@ index fe57e7a..0573d42 100644
/* to find an entry in a page-table-directory. */
static inline unsigned long pud_index(unsigned long address)
-@@ -626,7 +703,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+@@ -632,7 +709,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
static inline int pgd_bad(pgd_t pgd)
{
@@ -18156,7 +20531,7 @@ index fe57e7a..0573d42 100644
}
static inline int pgd_none(pgd_t pgd)
-@@ -649,7 +726,12 @@ static inline int pgd_none(pgd_t pgd)
+@@ -655,7 +732,12 @@ static inline int pgd_none(pgd_t pgd)
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
@@ -18170,7 +20545,7 @@ index fe57e7a..0573d42 100644
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
-@@ -660,6 +742,25 @@ static inline int pgd_none(pgd_t pgd)
+@@ -666,6 +748,25 @@ static inline int pgd_none(pgd_t pgd)
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
@@ -18196,7 +20571,7 @@ index fe57e7a..0573d42 100644
#ifndef __ASSEMBLY__
extern int direct_gbpages;
-@@ -826,11 +927,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+@@ -832,11 +933,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
@@ -18224,7 +20599,7 @@ index fe57e7a..0573d42 100644
static inline int page_level_shift(enum pg_level level)
{
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
-index b6c0b40..3535d47 100644
+index b6c0b40..7b497ea 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -25,9 +25,6 @@
@@ -18250,15 +20625,7 @@ index b6c0b40..3535d47 100644
#if defined(CONFIG_HIGHPTE)
#define pte_offset_map(dir, address) \
((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
-@@ -59,12 +62,17 @@ void paging_init(void);
- /* Clear a kernel PTE and flush it from the TLB */
- #define kpte_clear_flush(ptep, vaddr) \
- do { \
-+ pax_open_kernel(); \
- pte_clear(&init_mm, (vaddr), (ptep)); \
-+ pax_close_kernel(); \
- __flush_tlb_one((vaddr)); \
- } while (0)
+@@ -65,6 +68,9 @@ do { \
#endif /* !__ASSEMBLY__ */
@@ -18269,7 +20636,7 @@ index b6c0b40..3535d47 100644
* kern_addr_valid() is (1) for FLATMEM and (0) for
* SPARSEMEM and DISCONTIGMEM
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
-index 9fb2f2b..b04b4bf 100644
+index 9fb2f2b..8e18c70 100644
--- a/arch/x86/include/asm/pgtable_32_types.h
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -8,7 +8,7 @@
@@ -18281,7 +20648,7 @@ index 9fb2f2b..b04b4bf 100644
# define PMD_MASK (~(PMD_SIZE - 1))
#else
# include <asm/pgtable-2level_types.h>
-@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
+@@ -46,6 +46,28 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
#endif
@@ -18289,10 +20656,19 @@ index 9fb2f2b..b04b4bf 100644
+#ifndef __ASSEMBLY__
+extern unsigned char MODULES_EXEC_VADDR[];
+extern unsigned char MODULES_EXEC_END[];
++
++extern unsigned char __LOAD_PHYSICAL_ADDR[];
++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
++static inline unsigned long __intentional_overflow(-1) ktla_ktva(unsigned long addr)
++{
++ return addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET;
++
++}
++static inline unsigned long __intentional_overflow(-1) ktva_ktla(unsigned long addr)
++{
++ return addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET;
++}
+#endif
-+#include <asm/boot.h>
-+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
-+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
+#else
+#define ktla_ktva(addr) (addr)
+#define ktva_ktla(addr) (addr)
@@ -18302,10 +20678,10 @@ index 9fb2f2b..b04b4bf 100644
#define MODULES_END VMALLOC_END
#define MODULES_LEN (MODULES_VADDR - MODULES_END)
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
-index 2ee7811..55aca24 100644
+index 2ee7811..c985cfd 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
-@@ -16,11 +16,16 @@
+@@ -16,11 +16,17 @@
extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512];
@@ -18319,13 +20695,31 @@ index 2ee7811..55aca24 100644
-extern pte_t level1_fixmap_pgt[512];
-extern pgd_t init_level4_pgt[];
+extern pmd_t level2_ident_pgt[2][512];
++extern pte_t level1_modules_pgt[4][512];
+extern pte_t level1_fixmap_pgt[3][512];
+extern pte_t level1_vsyscall_pgt[512];
+extern pgd_t init_level4_pgt[512];
#define swapper_pg_dir init_level4_pgt
-@@ -62,7 +67,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+@@ -47,12 +53,16 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
+ static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+ {
++ pax_open_kernel();
+ *ptep = native_make_pte(0);
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pte(pte_t *ptep, pte_t pte)
+ {
++ pax_open_kernel();
+ *ptep = pte;
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+@@ -62,7 +72,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
@@ -18335,7 +20729,35 @@ index 2ee7811..55aca24 100644
}
static inline void native_pmd_clear(pmd_t *pmd)
-@@ -98,7 +105,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+@@ -73,7 +85,12 @@ static inline void native_pmd_clear(pmd_t *pmd)
+ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
+ {
+ #ifdef CONFIG_SMP
+- return native_make_pte(xchg(&xp->pte, 0));
++ pte_t pte;
++
++ pax_open_kernel();
++ pte = native_make_pte(xchg(&xp->pte, 0));
++ pax_close_kernel();
++ return pte;
+ #else
+ /* native_local_ptep_get_and_clear,
+ but duplicated because of cyclic dependency */
+@@ -86,7 +103,12 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
+ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
+ {
+ #ifdef CONFIG_SMP
+- return native_make_pmd(xchg(&xp->pmd, 0));
++ pmd_t pmd;
++
++ pax_open_kernel();
++ pmd = native_make_pmd(xchg(&xp->pmd, 0));
++ pax_close_kernel();
++ return pmd;
+ #else
+ /* native_local_pmdp_get_and_clear,
+ but duplicated because of cyclic dependency */
+@@ -98,7 +120,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
@@ -18345,7 +20767,7 @@ index 2ee7811..55aca24 100644
}
static inline void native_pud_clear(pud_t *pud)
-@@ -108,6 +117,13 @@ static inline void native_pud_clear(pud_t *pud)
+@@ -108,6 +132,13 @@ static inline void native_pud_clear(pud_t *pud)
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
@@ -18381,7 +20803,7 @@ index e6844df..432b56e 100644
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
-index 78f0c8c..4424bb0 100644
+index 13f310b..f0ef42e 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -85,8 +85,10 @@
@@ -18469,7 +20891,7 @@ index 78f0c8c..4424bb0 100644
#define pgprot_writecombine pgprot_writecombine
extern pgprot_t pgprot_writecombine(pgprot_t prot);
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
-index 8f327184..368fb29 100644
+index dca71714..919d4e1 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -84,7 +84,7 @@ static __always_inline void __preempt_count_sub(int val)
@@ -18482,10 +20904,19 @@ index 8f327184..368fb29 100644
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
-index 23ba676..6584489 100644
+index 944f178..37097a3 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
-@@ -130,7 +130,7 @@ struct cpuinfo_x86 {
+@@ -102,7 +102,7 @@ struct cpuinfo_x86 {
+ int x86_tlbsize;
+ #endif
+ __u8 x86_virt_bits;
+- __u8 x86_phys_bits;
++ __u8 x86_phys_bits __intentional_overflow(-1);
+ /* CPUID returned core id bits: */
+ __u8 x86_coreid_bits;
+ /* Max extended CPUID function supported: */
+@@ -136,7 +136,7 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
@@ -18494,7 +20925,7 @@ index 23ba676..6584489 100644
#define X86_VENDOR_INTEL 0
#define X86_VENDOR_CYRIX 1
-@@ -201,9 +201,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
+@@ -206,9 +206,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
: "memory");
}
@@ -18517,16 +20948,20 @@ index 23ba676..6584489 100644
}
#ifdef CONFIG_X86_32
-@@ -300,7 +312,7 @@ struct tss_struct {
+@@ -305,11 +317,9 @@ struct tss_struct {
} ____cacheline_aligned;
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss);
+extern struct tss_struct cpu_tss[NR_CPUS];
- #ifdef CONFIG_X86_32
+-#ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
-@@ -500,6 +512,7 @@ struct thread_struct {
+-#endif
+
+ /*
+ * Save the original ist values for checking stack pointers during debugging
+@@ -381,6 +391,7 @@ struct thread_struct {
unsigned short ds;
unsigned short fsindex;
unsigned short gsindex;
@@ -18534,7 +20969,7 @@ index 23ba676..6584489 100644
#endif
#ifdef CONFIG_X86_32
unsigned long ip;
-@@ -585,10 +598,10 @@ static inline void native_swapgs(void)
+@@ -463,10 +474,10 @@ static inline void native_swapgs(void)
#endif
}
@@ -18547,7 +20982,15 @@ index 23ba676..6584489 100644
#else
/* sp0 on x86_32 is special in and around vm86 mode. */
return this_cpu_read_stable(cpu_current_top_of_stack);
-@@ -837,8 +850,15 @@ static inline void spin_lock_prefetch(const void *x)
+@@ -709,20 +720,30 @@ static inline void spin_lock_prefetch(const void *x)
+ #define TOP_OF_INIT_STACK ((unsigned long)&init_stack + sizeof(init_stack) - \
+ TOP_OF_KERNEL_STACK_PADDING)
+
++extern union fpregs_state init_fpregs_state;
++
+ #ifdef CONFIG_X86_32
+ /*
+ * User space process size: 3GB (default).
*/
#define TASK_SIZE PAGE_OFFSET
#define TASK_SIZE_MAX TASK_SIZE
@@ -18564,7 +21007,14 @@ index 23ba676..6584489 100644
#define INIT_THREAD { \
.sp0 = TOP_OF_INIT_STACK, \
-@@ -859,12 +879,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
++ .fpu.state = &init_fpregs_state, \
+ }
+
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -737,12 +758,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
* "struct pt_regs" is possible, but they may contain the
* completely wrong values.
*/
@@ -18578,7 +21028,7 @@ index 23ba676..6584489 100644
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
-@@ -878,13 +893,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+@@ -756,13 +772,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
* particular problem by preventing anything from being mapped
* at the maximum canonical address.
*/
@@ -18594,7 +21044,17 @@ index 23ba676..6584489 100644
#define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
-@@ -918,6 +933,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+@@ -773,7 +789,8 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ #define STACK_TOP_MAX TASK_SIZE_MAX
+
+ #define INIT_THREAD { \
+- .sp0 = TOP_OF_INIT_STACK \
++ .sp0 = TOP_OF_INIT_STACK, \
++ .fpu.state = &init_fpregs_state, \
+ }
+
+ /*
+@@ -796,6 +813,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
*/
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
@@ -18605,7 +21065,7 @@ index 23ba676..6584489 100644
#define KSTK_EIP(task) (task_pt_regs(task)->ip)
/* Get/set a process' ability to use the timestamp counter instruction */
-@@ -962,7 +981,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
+@@ -841,7 +862,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
return 0;
}
@@ -18614,7 +21074,7 @@ index 23ba676..6584489 100644
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
-@@ -972,6 +991,6 @@ bool xen_set_default_idle(void);
+@@ -851,6 +872,6 @@ bool xen_set_default_idle(void);
#define xen_set_default_idle 0
#endif
@@ -18623,9 +21083,35 @@ index 23ba676..6584489 100644
void df_debug(struct pt_regs *regs, long error_code);
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
-index 5fabf13..7388158 100644
+index 5fabf13..7f90572 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
+@@ -21,10 +21,10 @@ struct pt_regs {
+ unsigned long fs;
+ unsigned long gs;
+ unsigned long orig_ax;
+- unsigned long ip;
++ unsigned long ip __intentional_overflow(-1);
+ unsigned long cs;
+ unsigned long flags;
+- unsigned long sp;
++ unsigned long sp __intentional_overflow(-1);
+ unsigned long ss;
+ };
+
+@@ -57,10 +57,10 @@ struct pt_regs {
+ */
+ unsigned long orig_ax;
+ /* Return frame for iretq */
+- unsigned long ip;
++ unsigned long ip __intentional_overflow(-1);
+ unsigned long cs;
+ unsigned long flags;
+- unsigned long sp;
++ unsigned long sp __intentional_overflow(-1);
+ unsigned long ss;
+ /* top of stack page */
+ };
@@ -125,15 +125,16 @@ static inline int v8086_mode(struct pt_regs *regs)
#ifdef CONFIG_X86_64
static inline bool user_64bit_mode(struct pt_regs *regs)
@@ -19106,7 +21592,7 @@ index ba665eb..0f72938 100644
static __always_inline void clac(void)
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
-index 17a8dce..79f7280 100644
+index 222a6a3..839da8d 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -35,7 +35,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
@@ -19116,9 +21602,9 @@ index 17a8dce..79f7280 100644
-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
- static inline struct cpumask *cpu_sibling_mask(int cpu)
+ static inline struct cpumask *cpu_llc_shared_mask(int cpu)
{
-@@ -78,7 +78,7 @@ struct smp_ops {
+@@ -68,7 +68,7 @@ struct smp_ops {
void (*send_call_func_ipi)(const struct cpumask *mask);
void (*send_call_func_single_ipi)(int cpu);
@@ -19127,7 +21613,7 @@ index 17a8dce..79f7280 100644
/* Globals due to paravirt */
extern void set_cpu_sibling_map(int cpu);
-@@ -192,14 +192,8 @@ extern unsigned disabled_cpus;
+@@ -182,14 +182,8 @@ extern unsigned disabled_cpus;
extern int safe_smp_processor_id(void);
#elif defined(CONFIG_X86_64_SMP)
@@ -19145,10 +21631,10 @@ index 17a8dce..79f7280 100644
#endif
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
-index 6a99859..03cb807 100644
+index c2e00bb..a10266e 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
-@@ -47,7 +47,7 @@
+@@ -49,7 +49,7 @@
* head_32 for boot CPU and setup_per_cpu_areas() for others.
*/
#define GDT_STACK_CANARY_INIT \
@@ -19157,7 +21643,7 @@ index 6a99859..03cb807 100644
/*
* Initialize the stackprotector canary value.
-@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
+@@ -114,7 +114,7 @@ static inline void setup_stack_canary_segment(int cpu)
static inline void load_stack_canary_segment(void)
{
@@ -19220,39 +21706,10 @@ index 70bbe39..4ae2bd4 100644
void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
-index 751bf4b..3cc39f1 100644
+index d7f3b3b..3cc39f1 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
-@@ -79,12 +79,12 @@ do { \
- #else /* CONFIG_X86_32 */
-
- /* frame pointer must be last for get_wchan */
--#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
--#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
-+#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
-
- #define __EXTRA_CLOBBER \
- , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
-- "r12", "r13", "r14", "r15", "flags"
-+ "r12", "r13", "r14", "r15"
-
- #ifdef CONFIG_CC_STACKPROTECTOR
- #define __switch_canary \
-@@ -100,11 +100,7 @@ do { \
- #define __switch_canary_iparam
- #endif /* CC_STACKPROTECTOR */
-
--/*
-- * There is no need to save or restore flags, because flags are always
-- * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
-- * has no effect.
-- */
-+/* Save restore flags to clear handle leaking NT */
- #define switch_to(prev, next, last) \
- asm volatile(SAVE_CONTEXT \
- "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
-@@ -112,7 +108,7 @@ do { \
+@@ -108,7 +108,7 @@ do { \
"call __switch_to\n\t" \
"movq "__percpu_arg([current_task])",%%rsi\n\t" \
__switch_canary \
@@ -19261,7 +21718,7 @@ index 751bf4b..3cc39f1 100644
"movq %%rax,%%rdi\n\t" \
"testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
"jnz ret_from_fork\n\t" \
-@@ -123,7 +119,7 @@ do { \
+@@ -119,7 +119,7 @@ do { \
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
[_tif_fork] "i" (_TIF_FORK), \
@@ -19270,8 +21727,32 @@ index 751bf4b..3cc39f1 100644
[current_task] "m" (current_task) \
__switch_canary_iparam \
: "memory", "cc" __EXTRA_CLOBBER)
+diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
+index 82c34ee..940fa40 100644
+--- a/arch/x86/include/asm/sys_ia32.h
++++ b/arch/x86/include/asm/sys_ia32.h
+@@ -20,8 +20,8 @@
+ #include <asm/ia32.h>
+
+ /* ia32/sys_ia32.c */
+-asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long);
+-asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long);
++asmlinkage long sys32_truncate64(const char __user *, unsigned int, unsigned int);
++asmlinkage long sys32_ftruncate64(unsigned int, unsigned int, unsigned int);
+
+ asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *);
+ asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *);
+@@ -42,7 +42,7 @@ long sys32_vm86_warning(void);
+ asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t);
+ asmlinkage long sys32_sync_file_range(int, unsigned, unsigned,
+ unsigned, unsigned, int);
+-asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int);
++asmlinkage long sys32_fadvise64(int, unsigned, unsigned, int, int);
+ asmlinkage long sys32_fallocate(int, int, unsigned,
+ unsigned, unsigned, unsigned);
+
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
-index b4bdec3..e8af9bc 100644
+index 225ee54..fae4566 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -36,7 +36,7 @@
@@ -19352,9 +21833,9 @@ index b4bdec3..e8af9bc 100644
/* Only used for 64 bit */
#define _TIF_DO_NOTIFY_MASK \
-@@ -179,9 +180,11 @@ struct thread_info {
-
- DECLARE_PER_CPU(unsigned long, kernel_stack);
+@@ -177,9 +178,11 @@ struct thread_info {
+ */
+ #ifndef __ASSEMBLY__
+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
+
@@ -19365,17 +21846,23 @@ index b4bdec3..e8af9bc 100644
}
static inline unsigned long current_stack_pointer(void)
-@@ -199,8 +202,7 @@ static inline unsigned long current_stack_pointer(void)
+@@ -195,14 +198,9 @@ static inline unsigned long current_stack_pointer(void)
+
+ #else /* !__ASSEMBLY__ */
+-#ifdef CONFIG_X86_64
+-# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
+-#endif
+-
/* Load thread_info address into "reg" */
#define GET_THREAD_INFO(reg) \
-- _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
+- _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
- _ASM_SUB $(THREAD_SIZE),reg ;
+ _ASM_MOV PER_CPU_VAR(current_tinfo),reg ;
/*
* ASM operand which evaluates to a 'thread_info' address of
-@@ -293,5 +295,12 @@ static inline bool is_ia32_task(void)
+@@ -295,5 +293,12 @@ static inline bool is_ia32_task(void)
extern void arch_task_cache_init(void);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
extern void arch_release_task_struct(struct task_struct *tsk);
@@ -19488,7 +21975,7 @@ index cd79194..6a9956f 100644
}
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
-index ace9dec..3f9e253 100644
+index a8df874..ef0e34f 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -7,6 +7,7 @@
@@ -19512,7 +21999,7 @@ index ace9dec..3f9e253 100644
#define segment_eq(a, b) ((a).seg == (b).seg)
-@@ -85,8 +91,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
+@@ -86,8 +92,36 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
@@ -19551,7 +22038,7 @@ index ace9dec..3f9e253 100644
/*
* The exception table consists of pairs of addresses relative to the
-@@ -134,11 +168,13 @@ extern int __get_user_8(void);
+@@ -135,11 +169,13 @@ extern int __get_user_8(void);
extern int __get_user_bad(void);
/*
@@ -19568,7 +22055,7 @@ index ace9dec..3f9e253 100644
/**
* get_user: - Get a simple variable from user space.
-@@ -176,10 +212,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+@@ -178,10 +214,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
might_fault(); \
@@ -19581,7 +22068,7 @@ index ace9dec..3f9e253 100644
__ret_gu; \
})
-@@ -187,13 +225,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+@@ -189,13 +227,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
: "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
@@ -19606,7 +22093,7 @@ index ace9dec..3f9e253 100644
"3: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \
-@@ -206,8 +252,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+@@ -208,8 +254,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
#define __put_user_asm_ex_u64(x, addr) \
asm volatile(ASM_STAC "\n" \
@@ -19617,7 +22104,7 @@ index ace9dec..3f9e253 100644
"3: " ASM_CLAC "\n" \
_ASM_EXTABLE_EX(1b, 2b) \
_ASM_EXTABLE_EX(2b, 3b) \
-@@ -257,7 +303,8 @@ extern void __put_user_8(void);
+@@ -260,7 +306,8 @@ extern void __put_user_8(void);
__typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \
might_fault(); \
@@ -19627,7 +22114,7 @@ index ace9dec..3f9e253 100644
switch (sizeof(*(ptr))) { \
case 1: \
__put_user_x(1, __pu_val, ptr, __ret_pu); \
-@@ -275,6 +322,7 @@ extern void __put_user_8(void);
+@@ -278,6 +325,7 @@ extern void __put_user_8(void);
__put_user_x(X, __pu_val, ptr, __ret_pu); \
break; \
} \
@@ -19635,7 +22122,7 @@ index ace9dec..3f9e253 100644
__ret_pu; \
})
-@@ -355,8 +403,10 @@ do { \
+@@ -358,8 +406,10 @@ do { \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -19647,7 +22134,7 @@ index ace9dec..3f9e253 100644
"2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
-@@ -364,8 +414,10 @@ do { \
+@@ -367,8 +417,10 @@ do { \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
@@ -19660,7 +22147,7 @@ index ace9dec..3f9e253 100644
#define __get_user_size_ex(x, ptr, size) \
do { \
-@@ -389,7 +441,7 @@ do { \
+@@ -392,7 +444,7 @@ do { \
} while (0)
#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
@@ -19669,7 +22156,7 @@ index ace9dec..3f9e253 100644
"2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
: ltype(x) : "m" (__m(addr)))
-@@ -406,13 +458,24 @@ do { \
+@@ -409,13 +461,24 @@ do { \
int __gu_err; \
unsigned long __gu_val; \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
@@ -19696,7 +22183,7 @@ index ace9dec..3f9e253 100644
/*
* Tell gcc we read from memory instead of writing: this is because
-@@ -420,8 +483,10 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -423,8 +486,10 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
@@ -19708,7 +22195,7 @@ index ace9dec..3f9e253 100644
"2: " ASM_CLAC "\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
-@@ -429,10 +494,12 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -432,10 +497,12 @@ struct __large_struct { unsigned long buf[100]; };
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r"(err) \
@@ -19723,7 +22210,7 @@ index ace9dec..3f9e253 100644
"2:\n" \
_ASM_EXTABLE_EX(1b, 2b) \
: : ltype(x), "m" (__m(addr)))
-@@ -442,11 +509,13 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -445,11 +512,13 @@ struct __large_struct { unsigned long buf[100]; };
*/
#define uaccess_try do { \
current_thread_info()->uaccess_err = 0; \
@@ -19737,7 +22224,7 @@ index ace9dec..3f9e253 100644
(err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
} while (0)
-@@ -471,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -475,8 +544,12 @@ struct __large_struct { unsigned long buf[100]; };
* On error, the variable @x is set to zero.
*/
@@ -19750,7 +22237,7 @@ index ace9dec..3f9e253 100644
/**
* __put_user: - Write a simple value into user space, with less checking.
-@@ -494,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -499,8 +572,12 @@ struct __large_struct { unsigned long buf[100]; };
* Returns zero on success, or -EFAULT on error.
*/
@@ -19763,7 +22250,7 @@ index ace9dec..3f9e253 100644
#define __get_user_unaligned __get_user
#define __put_user_unaligned __put_user
-@@ -513,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
+@@ -518,7 +595,7 @@ struct __large_struct { unsigned long buf[100]; };
#define get_user_ex(x, ptr) do { \
unsigned long __gue_val; \
__get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
@@ -19772,7 +22259,7 @@ index ace9dec..3f9e253 100644
} while (0)
#define put_user_try uaccess_try
-@@ -531,7 +608,7 @@ extern __must_check long strlen_user(const char __user *str);
+@@ -536,7 +613,7 @@ extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
@@ -19781,7 +22268,7 @@ index ace9dec..3f9e253 100644
extern void __cmpxchg_wrong_size(void)
__compiletime_error("Bad argument size for cmpxchg");
-@@ -542,18 +619,19 @@ extern void __cmpxchg_wrong_size(void)
+@@ -547,18 +624,19 @@ extern void __cmpxchg_wrong_size(void)
__typeof__(ptr) __uval = (uval); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
@@ -19803,7 +22290,7 @@ index ace9dec..3f9e253 100644
: "i" (-EFAULT), "q" (__new), "1" (__old) \
: "memory" \
); \
-@@ -562,14 +640,14 @@ extern void __cmpxchg_wrong_size(void)
+@@ -567,14 +645,14 @@ extern void __cmpxchg_wrong_size(void)
case 2: \
{ \
asm volatile("\t" ASM_STAC "\n" \
@@ -19820,7 +22307,7 @@ index ace9dec..3f9e253 100644
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
); \
-@@ -578,14 +656,14 @@ extern void __cmpxchg_wrong_size(void)
+@@ -583,14 +661,14 @@ extern void __cmpxchg_wrong_size(void)
case 4: \
{ \
asm volatile("\t" ASM_STAC "\n" \
@@ -19837,7 +22324,7 @@ index ace9dec..3f9e253 100644
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
); \
-@@ -597,14 +675,14 @@ extern void __cmpxchg_wrong_size(void)
+@@ -602,14 +680,14 @@ extern void __cmpxchg_wrong_size(void)
__cmpxchg_wrong_size(); \
\
asm volatile("\t" ASM_STAC "\n" \
@@ -19854,7 +22341,7 @@ index ace9dec..3f9e253 100644
: "i" (-EFAULT), "r" (__new), "1" (__old) \
: "memory" \
); \
-@@ -613,6 +691,7 @@ extern void __cmpxchg_wrong_size(void)
+@@ -618,6 +696,7 @@ extern void __cmpxchg_wrong_size(void)
default: \
__cmpxchg_wrong_size(); \
} \
@@ -19862,7 +22349,7 @@ index ace9dec..3f9e253 100644
*__uval = __old; \
__ret; \
})
-@@ -636,17 +715,6 @@ extern struct movsl_mask {
+@@ -641,17 +720,6 @@ extern struct movsl_mask {
#define ARCH_HAS_NOCACHE_UACCESS 1
@@ -19880,7 +22367,7 @@ index ace9dec..3f9e253 100644
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
# define copy_user_diag __compiletime_error
#else
-@@ -656,7 +724,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
+@@ -661,7 +729,7 @@ unsigned long __must_check _copy_to_user(void __user *to, const void *from,
extern void copy_user_diag("copy_from_user() buffer size is too small")
copy_from_user_overflow(void);
extern void copy_user_diag("copy_to_user() buffer size is too small")
@@ -19889,7 +22376,7 @@ index ace9dec..3f9e253 100644
#undef copy_user_diag
-@@ -669,7 +737,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
+@@ -674,7 +742,7 @@ __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
extern void
__compiletime_warning("copy_to_user() buffer size is not provably correct")
@@ -19898,7 +22385,7 @@ index ace9dec..3f9e253 100644
#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
#else
-@@ -684,10 +752,16 @@ __copy_from_user_overflow(int size, unsigned long count)
+@@ -689,10 +757,16 @@ __copy_from_user_overflow(int size, unsigned long count)
#endif
@@ -19916,7 +22403,7 @@ index ace9dec..3f9e253 100644
might_fault();
-@@ -709,12 +783,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+@@ -714,12 +788,15 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
* case, and do only runtime checking for non-constant sizes.
*/
@@ -19938,7 +22425,7 @@ index ace9dec..3f9e253 100644
return n;
}
-@@ -722,17 +799,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+@@ -727,17 +804,18 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
static inline unsigned long __must_check
copy_to_user(void __user *to, const void *from, unsigned long n)
{
@@ -19965,7 +22452,7 @@ index ace9dec..3f9e253 100644
return n;
}
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
-index 3c03a5d..edb68ae 100644
+index f5dcb52..da2c15b 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -40,9 +40,14 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
@@ -19984,7 +22471,7 @@ index 3c03a5d..edb68ae 100644
if (__builtin_constant_p(n)) {
unsigned long ret;
-@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
+@@ -87,12 +92,16 @@ static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
@@ -20002,7 +22489,7 @@ index 3c03a5d..edb68ae 100644
/* Avoid zeroing the tail if the copy fails..
* If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
* but as the zeroing behaviour is only significant when n is not
-@@ -137,6 +146,12 @@ static __always_inline unsigned long
+@@ -143,6 +152,12 @@ static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
@@ -20015,7 +22502,7 @@ index 3c03a5d..edb68ae 100644
if (__builtin_constant_p(n)) {
unsigned long ret;
-@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
+@@ -165,6 +180,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
might_fault();
@@ -20026,7 +22513,7 @@ index 3c03a5d..edb68ae 100644
if (__builtin_constant_p(n)) {
unsigned long ret;
-@@ -181,7 +200,10 @@ static __always_inline unsigned long
+@@ -187,7 +206,10 @@ static __always_inline unsigned long
__copy_from_user_inatomic_nocache(void *to, const void __user *from,
unsigned long n)
{
@@ -20379,7 +22866,7 @@ index 5b238981..77fdd78 100644
#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
-index f58a9c7..dc378042a 100644
+index 48d34d2..90671c7 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -129,7 +129,7 @@ struct x86_init_ops {
@@ -20408,20 +22895,17 @@ index f58a9c7..dc378042a 100644
+} __no_const;
struct pci_dev;
- struct msi_msg;
-@@ -182,7 +182,7 @@ struct x86_msi_ops {
+
+@@ -177,12 +177,12 @@ struct x86_msi_ops {
+ void (*teardown_msi_irq)(unsigned int irq);
void (*teardown_msi_irqs)(struct pci_dev *dev);
void (*restore_msi_irqs)(struct pci_dev *dev);
- int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
-};
+} __no_const;
- struct IO_APIC_route_entry;
- struct io_apic_irq_attr;
-@@ -203,7 +203,7 @@ struct x86_io_apic_ops {
- unsigned int destination, int vector,
- struct io_apic_irq_attr *attr);
- void (*eoi_ioapic_pin)(int apic, int pin, int vector);
+ struct x86_io_apic_ops {
+ unsigned int (*read) (unsigned int apic, unsigned int reg);
+ void (*disable)(void);
-};
+} __no_const;
@@ -20440,56 +22924,11 @@ index c44a5d5..7f83cfc 100644
{
unsigned long mfn;
-diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
-index c9a6d68..cb57f42 100644
---- a/arch/x86/include/asm/xsave.h
-+++ b/arch/x86/include/asm/xsave.h
-@@ -223,12 +223,16 @@ static inline int xsave_user(struct xsave_struct __user *buf)
- if (unlikely(err))
- return -EFAULT;
-
-+ pax_open_userland();
- __asm__ __volatile__(ASM_STAC "\n"
-- "1:"XSAVE"\n"
-+ "1:"
-+ __copyuser_seg
-+ XSAVE"\n"
- "2: " ASM_CLAC "\n"
- xstate_fault
- : "D" (buf), "a" (-1), "d" (-1), "0" (0)
- : "memory");
-+ pax_close_userland();
- return err;
- }
-
-@@ -238,16 +242,20 @@ static inline int xsave_user(struct xsave_struct __user *buf)
- static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
- {
- int err = 0;
-- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
-+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
- u32 lmask = mask;
- u32 hmask = mask >> 32;
-
-+ pax_open_userland();
- __asm__ __volatile__(ASM_STAC "\n"
-- "1:"XRSTOR"\n"
-+ "1:"
-+ __copyuser_seg
-+ XRSTOR"\n"
- "2: " ASM_CLAC "\n"
- xstate_fault
- : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
- : "memory"); /* memory required? */
-+ pax_close_userland();
- return err;
- }
-
diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
-index 960a8a9..404daf7 100644
+index 0f457e6..5970c0a 100644
--- a/arch/x86/include/uapi/asm/e820.h
+++ b/arch/x86/include/uapi/asm/e820.h
-@@ -68,7 +68,7 @@ struct e820map {
+@@ -69,7 +69,7 @@ struct e820map {
#define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000
@@ -20499,7 +22938,7 @@ index 960a8a9..404daf7 100644
#define BIOS_ROM_BASE 0xffe00000
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
-index 9bcd0b5..750f1b7 100644
+index 0f15af4..501a76a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -28,7 +28,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
@@ -20510,12 +22949,12 @@ index 9bcd0b5..750f1b7 100644
+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += mcount_64.o
- obj-y += syscall_$(BITS).o vsyscall_gtod.o
+ obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
-index dbe76a1..e2ec334 100644
+index 9393896..adbaa90 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
-@@ -1361,7 +1361,7 @@ static void __init acpi_reduced_hw_init(void)
+@@ -1333,7 +1333,7 @@ static void __init acpi_reduced_hw_init(void)
* If your system is blacklisted here, but you find that acpi=force
* works for you, please contact linux-acpi@vger.kernel.org
*/
@@ -20524,7 +22963,7 @@ index dbe76a1..e2ec334 100644
/*
* Boxes that need ACPI disabled
*/
-@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
+@@ -1408,7 +1408,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
};
/* second table for DMI checks that should run after early-quirks */
@@ -20551,10 +22990,10 @@ index d1daead..acd77e2 100644
#endif
initial_code = (unsigned long)wakeup_long64;
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
-index 665c6b7..eae4d56 100644
+index 0c26b1b..a766e85 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
-@@ -29,13 +29,11 @@ wakeup_pmode_return:
+@@ -31,13 +31,11 @@ wakeup_pmode_return:
# and restore the stack ... but you need gdt for this to work
movl saved_context_esp, %esp
@@ -20571,10 +23010,18 @@ index 665c6b7..eae4d56 100644
bogus_magic:
jmp bogus_magic
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
-index aef6531..d7ca83a 100644
+index c42827e..c2fd50b 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
-@@ -248,7 +248,9 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
+@@ -20,6 +20,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/io.h>
+ #include <asm/fixmap.h>
++#include <asm/boot.h>
+
+ int __read_mostly alternatives_patched;
+
+@@ -261,7 +262,9 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
unsigned int noplen = len;
if (noplen > ASM_NOP_MAX)
noplen = ASM_NOP_MAX;
@@ -20584,13 +23031,13 @@ index aef6531..d7ca83a 100644
insns += noplen;
len -= noplen;
}
-@@ -276,6 +278,13 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
+@@ -289,6 +292,13 @@ recompute_jump(struct alt_instr *a, u8 *orig_insn, u8 *repl_insn, u8 *insnbuf)
if (a->replacementlen != 5)
return;
+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
+ if (orig_insn < (u8 *)_text || (u8 *)_einittext <= orig_insn)
-+ orig_insn = ktva_ktla(orig_insn);
++ orig_insn = (u8 *)ktva_ktla((unsigned long)orig_insn);
+ else
+ orig_insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
+#endif
@@ -20598,7 +23045,7 @@ index aef6531..d7ca83a 100644
o_dspl = *(s32 *)(insnbuf + 1);
/* next_rip of the replacement JMP */
-@@ -346,6 +355,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+@@ -359,6 +369,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
{
struct alt_instr *a;
u8 *instr, *replacement;
@@ -20606,7 +23053,7 @@ index aef6531..d7ca83a 100644
u8 insnbuf[MAX_PATCH_LEN];
DPRINTK("alt table %p -> %p", start, end);
-@@ -361,46 +371,71 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
+@@ -374,46 +385,71 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
for (a = start; a < end; a++) {
int insnbuf_sz = 0;
@@ -20618,11 +23065,11 @@ index aef6531..d7ca83a 100644
+ if ((u8 *)_text - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR) <= instr &&
+ instr < (u8 *)_einittext - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR)) {
+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
-+ vinstr = ktla_ktva(instr);
++ vinstr = (u8 *)ktla_ktva((unsigned long)instr);
+ } else if ((u8 *)_text <= instr && instr < (u8 *)_einittext) {
-+ vinstr = ktla_ktva(instr);
++ vinstr = (u8 *)ktla_ktva((unsigned long)instr);
+ } else {
-+ instr = ktva_ktla(instr);
++ instr = (u8 *)ktva_ktla((unsigned long)instr);
+ }
+#endif
+
@@ -20632,11 +23079,11 @@ index aef6531..d7ca83a 100644
+ if ((u8 *)_text - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR) <= replacement &&
+ replacement < (u8 *)_einittext - (____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR)) {
+ replacement += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
-+ vreplacement = ktla_ktva(replacement);
++ vreplacement = (u8 *)ktla_ktva((unsigned long)replacement);
+ } else if ((u8 *)_text <= replacement && replacement < (u8 *)_einittext) {
-+ vreplacement = ktla_ktva(replacement);
++ vreplacement = (u8 *)ktla_ktva((unsigned long)replacement);
+ } else
-+ replacement = ktva_ktla(replacement);
++ replacement = (u8 *)ktva_ktla((unsigned long)replacement);
+#endif
+
BUG_ON(a->instrlen > sizeof(insnbuf));
@@ -20692,7 +23139,7 @@ index aef6531..d7ca83a 100644
text_poke_early(instr, insnbuf, insnbuf_sz);
}
-@@ -416,10 +451,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
+@@ -429,10 +465,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
for (poff = start; poff < end; poff++) {
u8 *ptr = (u8 *)poff + *poff;
@@ -20706,11 +23153,11 @@ index aef6531..d7ca83a 100644
continue;
/* turn DS segment override prefix into lock prefix */
- if (*ptr == 0x3e)
-+ if (*ktla_ktva(ptr) == 0x3e)
++ if (*(u8 *)ktla_ktva((unsigned long)ptr) == 0x3e)
text_poke(ptr, ((unsigned char []){0xf0}), 1);
}
mutex_unlock(&text_mutex);
-@@ -434,10 +475,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
+@@ -447,10 +489,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
for (poff = start; poff < end; poff++) {
u8 *ptr = (u8 *)poff + *poff;
@@ -20724,20 +23171,20 @@ index aef6531..d7ca83a 100644
continue;
/* turn lock prefix into DS segment override prefix */
- if (*ptr == 0xf0)
-+ if (*ktla_ktva(ptr) == 0xf0)
++ if (*(u8 *)ktla_ktva((unsigned long)ptr) == 0xf0)
text_poke(ptr, ((unsigned char []){0x3E}), 1);
}
mutex_unlock(&text_mutex);
-@@ -574,7 +621,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
+@@ -587,7 +635,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
BUG_ON(p->len > MAX_PATCH_LEN);
/* prep the buffer with the original instructions */
- memcpy(insnbuf, p->instr, p->len);
-+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
++ memcpy(insnbuf, (const void *)ktla_ktva((unsigned long)p->instr), p->len);
used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
(unsigned long)p->instr, p->len);
-@@ -621,7 +668,7 @@ void __init alternative_instructions(void)
+@@ -634,7 +682,7 @@ void __init alternative_instructions(void)
if (!uniproc_patched || num_possible_cpus() == 1)
free_init_pages("SMP alternatives",
(unsigned long)__smp_locks,
@@ -20746,7 +23193,7 @@ index aef6531..d7ca83a 100644
#endif
apply_paravirt(__parainstructions, __parainstructions_end);
-@@ -641,13 +688,17 @@ void __init alternative_instructions(void)
+@@ -655,13 +703,17 @@ void __init alternative_instructions(void)
* instructions. And on the local CPU you need to be protected again NMI or MCE
* handlers seeing an inconsistent instruction while you patch.
*/
@@ -20759,20 +23206,20 @@ index aef6531..d7ca83a 100644
- memcpy(addr, opcode, len);
+
+ pax_open_kernel();
-+ memcpy(ktla_ktva(addr), opcode, len);
++ memcpy((void *)ktla_ktva((unsigned long)addr), opcode, len);
sync_core();
+ pax_close_kernel();
+
local_irq_restore(flags);
/* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */
-@@ -669,36 +720,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
+@@ -683,36 +735,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
*/
void *text_poke(void *addr, const void *opcode, size_t len)
{
- unsigned long flags;
- char *vaddr;
-+ unsigned char *vaddr = ktla_ktva(addr);
++ unsigned char *vaddr = (void *)ktla_ktva((unsigned long)addr);
struct page *pages[2];
- int i;
+ size_t i;
@@ -20811,7 +23258,7 @@ index aef6531..d7ca83a 100644
return addr;
}
-@@ -752,7 +789,7 @@ int poke_int3_handler(struct pt_regs *regs)
+@@ -766,7 +804,7 @@ int poke_int3_handler(struct pt_regs *regs)
*/
void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
{
@@ -20891,29 +23338,29 @@ index c4a8d63..fe893ac 100644
.name = "bigsmp",
.probe = probe_bigsmp,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
-index f4dc246..fbab133 100644
+index 206052e..621dfb4 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
-@@ -1862,7 +1862,7 @@ int native_ioapic_set_affinity(struct irq_data *data,
- return ret;
+@@ -1682,7 +1682,7 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
+ return was_pending;
}
-atomic_t irq_mis_count;
+atomic_unchecked_t irq_mis_count;
#ifdef CONFIG_GENERIC_PENDING_IRQ
- static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
-@@ -2003,7 +2003,7 @@ static void ack_ioapic_level(struct irq_data *data)
+ static bool io_apic_level_ack_pending(struct mp_chip_data *data)
+@@ -1821,7 +1821,7 @@ static void ioapic_ack_level(struct irq_data *irq_data)
* at the cpu.
*/
if (!(v & (1 << (i & 0x1f)))) {
- atomic_inc(&irq_mis_count);
+ atomic_inc_unchecked(&irq_mis_count);
-
- eoi_ioapic_irq(irq, cfg);
+ eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
}
-@@ -2011,7 +2011,7 @@ static void ack_ioapic_level(struct irq_data *data)
- ioapic_irqd_unmask(data, cfg, masked);
+
+@@ -1867,7 +1867,7 @@ static int ioapic_set_affinity(struct irq_data *irq_data,
+ return ret;
}
-static struct irq_chip ioapic_chip __read_mostly = {
@@ -20921,7 +23368,7 @@ index f4dc246..fbab133 100644
.name = "IO-APIC",
.irq_startup = startup_ioapic_irq,
.irq_mask = mask_ioapic_irq,
-@@ -2070,7 +2070,7 @@ static void ack_lapic_irq(struct irq_data *data)
+@@ -1936,7 +1936,7 @@ static void ack_lapic_irq(struct irq_data *data)
ack_APIC_irq();
}
@@ -20930,6 +23377,19 @@ index f4dc246..fbab133 100644
.name = "local-APIC",
.irq_mask = mask_lapic_irq,
.irq_unmask = unmask_lapic_irq,
+diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
+index 1a9d735..c58b5c5 100644
+--- a/arch/x86/kernel/apic/msi.c
++++ b/arch/x86/kernel/apic/msi.c
+@@ -267,7 +267,7 @@ static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+ hpet_msi_write(data->handler_data, msg);
+ }
+
+-static struct irq_chip hpet_msi_controller = {
++static irq_chip_no_const hpet_msi_controller __read_only = {
+ .name = "HPET-MSI",
+ .irq_unmask = hpet_msi_unmask,
+ .irq_mask = hpet_msi_mask,
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index bda4886..f9c7195 100644
--- a/arch/x86/kernel/apic/probe_32.c
@@ -20944,19 +23404,19 @@ index bda4886..f9c7195 100644
.name = "default",
.probe = probe_default,
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
-index 6cedd79..023ff8e 100644
+index 2683f36..0bdc74c 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
-@@ -21,7 +21,7 @@
-
- static DEFINE_RAW_SPINLOCK(vector_lock);
+@@ -36,7 +36,7 @@ static struct irq_chip lapic_controller;
+ static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
+ #endif
-void lock_vector_lock(void)
+void lock_vector_lock(void) __acquires(vector_lock)
{
/* Used to the online set of cpus does not change
* during assign_irq_vector.
-@@ -29,7 +29,7 @@ void lock_vector_lock(void)
+@@ -44,7 +44,7 @@ void lock_vector_lock(void)
raw_spin_lock(&vector_lock);
}
@@ -20988,10 +23448,10 @@ index ab3219b..e8033eb 100644
.name = "cluster x2apic",
.probe = x2apic_cluster_probe,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
-index 6fae733..5ca17af 100644
+index 3ffd925..8c0f5a8 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
-@@ -88,7 +88,7 @@ static int x2apic_phys_probe(void)
+@@ -90,7 +90,7 @@ static int x2apic_phys_probe(void)
return apic == &apic_x2apic_phys;
}
@@ -21098,7 +23558,7 @@ index 927ec92..de68f32 100644
proc_create("apm", 0, NULL, &apm_file_ops);
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
-index 9f6b934..cf5ffb3 100644
+index 8e3d22a1..37118b6 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -32,6 +32,8 @@ void common(void) {
@@ -21110,8 +23570,8 @@ index 9f6b934..cf5ffb3 100644
BLANK();
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-@@ -52,8 +54,26 @@ void common(void) {
- OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+@@ -73,8 +75,26 @@ void common(void) {
+ #endif
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
+
@@ -21138,10 +23598,10 @@ index 9f6b934..cf5ffb3 100644
BLANK();
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
-index 5ce6f2d..9e738f3 100644
+index d8f42f9..a46f1fc 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
-@@ -80,6 +80,7 @@ int main(void)
+@@ -59,6 +59,7 @@ int main(void)
BLANK();
#undef ENTRY
@@ -21165,10 +23625,10 @@ index 9bff687..5b899fb 100644
obj-y += common.o
obj-y += rdrand.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
-index e4cf633..941f450 100644
+index dd3a4ba..06672af 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
-@@ -729,7 +729,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+@@ -750,7 +750,7 @@ static void init_amd(struct cpuinfo_x86 *c)
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
/* AMD errata T13 (order #21922) */
@@ -21177,8 +23637,28 @@ index e4cf633..941f450 100644
/* Duron Rev A0 */
if (c->x86_model == 3 && c->x86_mask == 0)
size = 64;
+diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c
+index 04f0fe5..3c0598c 100644
+--- a/arch/x86/kernel/cpu/bugs_64.c
++++ b/arch/x86/kernel/cpu/bugs_64.c
+@@ -10,6 +10,7 @@
+ #include <asm/processor.h>
+ #include <asm/mtrr.h>
+ #include <asm/cacheflush.h>
++#include <asm/sections.h>
+
+ void __init check_bugs(void)
+ {
+@@ -18,6 +19,7 @@ void __init check_bugs(void)
+ printk(KERN_INFO "CPU: ");
+ print_cpu_info(&boot_cpu_data);
+ #endif
++ set_memory_nx((unsigned long)_sinitdata, (__START_KERNEL_map + KERNEL_IMAGE_SIZE - (unsigned long)_sinitdata) >> PAGE_SHIFT);
+ alternative_instructions();
+
+ /*
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
-index a62cf04..a55415c 100644
+index cb9e5df..0d25636 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -91,60 +91,6 @@ static const struct cpu_dev default_cpu = {
@@ -21239,10 +23719,10 @@ index a62cf04..a55415c 100644
-} };
-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
-
- static int __init x86_xsave_setup(char *s)
+ static int __init x86_mpx_setup(char *s)
{
- if (strlen(s))
-@@ -306,6 +252,109 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+ /* require an exact match without trailing characters */
+@@ -287,6 +233,109 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
}
}
@@ -21352,7 +23832,7 @@ index a62cf04..a55415c 100644
/*
* Some CPU features depend on higher CPUID levels, which may not always
* be available due to CPUID level capping or broken virtualization
-@@ -406,7 +455,7 @@ void switch_to_new_gdt(int cpu)
+@@ -387,7 +436,7 @@ void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
@@ -21361,7 +23841,7 @@ index a62cf04..a55415c 100644
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
/* Reload the per-cpu base */
-@@ -935,6 +984,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+@@ -918,6 +967,20 @@ static void identify_cpu(struct cpuinfo_x86 *c)
setup_smep(c);
setup_smap(c);
@@ -21382,7 +23862,7 @@ index a62cf04..a55415c 100644
/*
* The vendor-specific functions might have changed features.
* Now we do "generic changes."
-@@ -1009,7 +1072,7 @@ void enable_sep_cpu(void)
+@@ -992,7 +1055,7 @@ void enable_sep_cpu(void)
int cpu;
cpu = get_cpu();
@@ -21391,18 +23871,13 @@ index a62cf04..a55415c 100644
if (!boot_cpu_has(X86_FEATURE_SEP))
goto out;
-@@ -1155,14 +1218,16 @@ static __init int setup_disablecpuid(char *arg)
+@@ -1138,10 +1201,12 @@ static __init int setup_disablecpuid(char *arg)
}
__setup("clearcpuid=", setup_disablecpuid);
+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
+EXPORT_PER_CPU_SYMBOL(current_tinfo);
+
- DEFINE_PER_CPU(unsigned long, kernel_stack) =
-- (unsigned long)&init_thread_union + THREAD_SIZE;
-+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
- EXPORT_PER_CPU_SYMBOL(kernel_stack);
-
#ifdef CONFIG_X86_64
-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
@@ -21412,7 +23887,36 @@ index a62cf04..a55415c 100644
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE) __visible;
-@@ -1367,7 +1432,7 @@ void cpu_init(void)
+@@ -1253,21 +1318,21 @@ EXPORT_PER_CPU_SYMBOL(current_task);
+ DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
+ EXPORT_PER_CPU_SYMBOL(__preempt_count);
+
++#ifdef CONFIG_CC_STACKPROTECTOR
++DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
++#endif
++
++#endif /* CONFIG_X86_64 */
++
+ /*
+ * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
+ * the top of the kernel stack. Use an extra percpu variable to track the
+ * top of the kernel stack directly.
+ */
+ DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
+- (unsigned long)&init_thread_union + THREAD_SIZE;
++ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
+ EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
+
+-#ifdef CONFIG_CC_STACKPROTECTOR
+-DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
+-#endif
+-
+-#endif /* CONFIG_X86_64 */
+-
+ /*
+ * Clear all 6 debug registers:
+ */
+@@ -1343,7 +1408,7 @@ void cpu_init(void)
*/
load_ucode_ap();
@@ -21421,7 +23925,7 @@ index a62cf04..a55415c 100644
oist = &per_cpu(orig_ist, cpu);
#ifdef CONFIG_NUMA
-@@ -1399,7 +1464,6 @@ void cpu_init(void)
+@@ -1375,7 +1440,6 @@ void cpu_init(void)
wrmsrl(MSR_KERNEL_GS_BASE, 0);
barrier();
@@ -21429,7 +23933,7 @@ index a62cf04..a55415c 100644
x2apic_setup();
/*
-@@ -1451,7 +1515,7 @@ void cpu_init(void)
+@@ -1427,7 +1491,7 @@ void cpu_init(void)
{
int cpu = smp_processor_id();
struct task_struct *curr = current;
@@ -21439,7 +23943,7 @@ index a62cf04..a55415c 100644
wait_for_master_cpu(cpu);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
-index edcb0e2..a138233 100644
+index be4febc..f7af533 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -519,25 +519,23 @@ cache_private_attrs_is_visible(struct kobject *kobj,
@@ -21491,7 +23995,7 @@ index edcb0e2..a138233 100644
return &cache_private_group;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
-index 20190bd..cadb2ab 100644
+index df919ff..3332bf7 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -47,6 +47,7 @@
@@ -21502,7 +24006,7 @@ index 20190bd..cadb2ab 100644
#include "mce-internal.h"
-@@ -256,7 +257,7 @@ static void print_mce(struct mce *m)
+@@ -259,7 +260,7 @@ static void print_mce(struct mce *m)
!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
m->cs, m->ip);
@@ -21511,7 +24015,7 @@ index 20190bd..cadb2ab 100644
print_symbol("{%s}", m->ip);
pr_cont("\n");
}
-@@ -289,10 +290,10 @@ static void print_mce(struct mce *m)
+@@ -292,10 +293,10 @@ static void print_mce(struct mce *m)
#define PANIC_TIMEOUT 5 /* 5 seconds */
@@ -21524,7 +24028,7 @@ index 20190bd..cadb2ab 100644
/* Panic in progress. Enable interrupts and wait for final IPI */
static void wait_for_panic(void)
-@@ -316,7 +317,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
+@@ -319,7 +320,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
/*
* Make sure only one CPU runs in machine check panic
*/
@@ -21533,7 +24037,7 @@ index 20190bd..cadb2ab 100644
wait_for_panic();
barrier();
-@@ -324,7 +325,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
+@@ -327,7 +328,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
console_verbose();
} else {
/* Don't log too much for fake panic */
@@ -21542,7 +24046,7 @@ index 20190bd..cadb2ab 100644
return;
}
/* First print corrected ones that are still unlogged */
-@@ -363,7 +364,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
+@@ -366,7 +367,7 @@ static void mce_panic(const char *msg, struct mce *final, char *exp)
if (!fake_panic) {
if (panic_timeout == 0)
panic_timeout = mca_cfg.panic_timeout;
@@ -21551,7 +24055,7 @@ index 20190bd..cadb2ab 100644
} else
pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
}
-@@ -749,7 +750,7 @@ static int mce_timed_out(u64 *t, const char *msg)
+@@ -752,7 +753,7 @@ static int mce_timed_out(u64 *t, const char *msg)
* might have been modified by someone else.
*/
rmb();
@@ -21560,7 +24064,7 @@ index 20190bd..cadb2ab 100644
wait_for_panic();
if (!mca_cfg.monarch_timeout)
goto out;
-@@ -1679,7 +1680,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+@@ -1708,7 +1709,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
}
/* Call the installed machine check handler for this CPU setup. */
@@ -21569,7 +24073,7 @@ index 20190bd..cadb2ab 100644
unexpected_machine_check;
/*
-@@ -1702,7 +1703,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
+@@ -1731,7 +1732,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
return;
}
@@ -21579,7 +24083,7 @@ index 20190bd..cadb2ab 100644
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(c);
-@@ -1716,7 +1719,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
+@@ -1745,7 +1748,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
*/
static DEFINE_SPINLOCK(mce_chrdev_state_lock);
@@ -21588,7 +24092,7 @@ index 20190bd..cadb2ab 100644
static int mce_chrdev_open_exclu; /* already open exclusive? */
static int mce_chrdev_open(struct inode *inode, struct file *file)
-@@ -1724,7 +1727,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+@@ -1753,7 +1756,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
spin_lock(&mce_chrdev_state_lock);
if (mce_chrdev_open_exclu ||
@@ -21597,7 +24101,7 @@ index 20190bd..cadb2ab 100644
spin_unlock(&mce_chrdev_state_lock);
return -EBUSY;
-@@ -1732,7 +1735,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
+@@ -1761,7 +1764,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
if (file->f_flags & O_EXCL)
mce_chrdev_open_exclu = 1;
@@ -21606,7 +24110,7 @@ index 20190bd..cadb2ab 100644
spin_unlock(&mce_chrdev_state_lock);
-@@ -1743,7 +1746,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
+@@ -1772,7 +1775,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
{
spin_lock(&mce_chrdev_state_lock);
@@ -21615,7 +24119,7 @@ index 20190bd..cadb2ab 100644
mce_chrdev_open_exclu = 0;
spin_unlock(&mce_chrdev_state_lock);
-@@ -2419,7 +2422,7 @@ static __init void mce_init_banks(void)
+@@ -2448,7 +2451,7 @@ static __init void mce_init_banks(void)
for (i = 0; i < mca_cfg.banks; i++) {
struct mce_bank *b = &mce_banks[i];
@@ -21624,7 +24128,7 @@ index 20190bd..cadb2ab 100644
sysfs_attr_init(&a->attr);
a->attr.name = b->attrname;
-@@ -2526,7 +2529,7 @@ struct dentry *mce_get_debugfs_dir(void)
+@@ -2555,7 +2558,7 @@ struct dentry *mce_get_debugfs_dir(void)
static void mce_reset(void)
{
cpu_missing = 0;
@@ -21678,10 +24182,10 @@ index 44f1382..315b292 100644
wmb();
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
-index 36a8361..e7058c2 100644
+index 6236a54..532026d 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
-@@ -518,7 +518,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
+@@ -460,7 +460,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
@@ -21691,10 +24195,10 @@ index 36a8361..e7058c2 100644
};
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
-index a41bead..4e3685b 100644
+index 969dc17..a9c3fdd 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
-@@ -298,13 +298,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
+@@ -237,13 +237,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
static int get_ucode_user(void *to, const void *from, size_t n)
{
@@ -21711,10 +24215,10 @@ index a41bead..4e3685b 100644
static void microcode_fini_cpu(int cpu)
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
-index ea5f363..cb0e905 100644
+index e7ed0d8..57a2ab9 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
-@@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
+@@ -72,7 +72,7 @@ static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
static bool mtrr_aps_delayed_init;
@@ -21724,7 +24228,7 @@ index ea5f363..cb0e905 100644
const struct mtrr_ops *mtrr_if;
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
-index df5e41f..816c719 100644
+index 951884d..4796b75 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -25,7 +25,7 @@ struct mtrr_ops {
@@ -21737,10 +24241,10 @@ index df5e41f..816c719 100644
extern int generic_get_free_region(unsigned long base, unsigned long size,
int replace_reg);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
-index aa4e3a7..469370f 100644
+index 9469dfa..2b026bc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
-@@ -1509,7 +1509,7 @@ static void __init pmu_check_apic(void)
+@@ -1518,7 +1518,7 @@ static void __init pmu_check_apic(void)
}
@@ -21749,7 +24253,7 @@ index aa4e3a7..469370f 100644
.name = "format",
.attrs = NULL,
};
-@@ -1608,7 +1608,7 @@ static struct attribute *events_attr[] = {
+@@ -1617,7 +1617,7 @@ static struct attribute *events_attr[] = {
NULL,
};
@@ -21758,16 +24262,25 @@ index aa4e3a7..469370f 100644
.name = "events",
.attrs = events_attr,
};
-@@ -2181,7 +2181,7 @@ static unsigned long get_segment_base(unsigned int segment)
+@@ -2176,7 +2176,7 @@ valid_user_frame(const void __user *fp, unsigned long size)
+ static unsigned long get_segment_base(unsigned int segment)
+ {
+ struct desc_struct *desc;
+- int idx = segment >> 3;
++ unsigned int idx = segment >> 3;
+
+ if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+ struct ldt_struct *ldt;
+@@ -2194,7 +2194,7 @@ static unsigned long get_segment_base(unsigned int segment)
if (idx > GDT_ENTRIES)
return 0;
-- desc = raw_cpu_ptr(gdt_page.gdt);
-+ desc = get_cpu_gdt_table(smp_processor_id());
+- desc = raw_cpu_ptr(gdt_page.gdt) + idx;
++ desc = get_cpu_gdt_table(smp_processor_id()) + idx;
}
- return get_desc_base(desc + idx);
-@@ -2271,7 +2271,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+ return get_desc_base(desc);
+@@ -2284,7 +2284,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
break;
perf_callchain_store(entry, frame.return_address);
@@ -21790,10 +24303,10 @@ index 97242a9..cf9c30e 100644
while (amd_iommu_v2_event_descs[i].attr.attr.name)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
-index 2813ea0..3ef5969c8 100644
+index 6326ae2..f092747 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
-@@ -3033,10 +3033,10 @@ __init int intel_pmu_init(void)
+@@ -3016,10 +3016,10 @@ __init int intel_pmu_init(void)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
if (boot_cpu_has(X86_FEATURE_PDCM)) {
@@ -21808,7 +24321,7 @@ index 2813ea0..3ef5969c8 100644
intel_ds_init();
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
-index 7795f3f..3535b76 100644
+index 43dd672..78c0562 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
@@ -252,7 +252,7 @@ static void bts_event_start(struct perf_event *event, int flags)
@@ -21839,10 +24352,10 @@ index 7795f3f..3535b76 100644
__bts_event_stop(event);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
-index cb77b11..8867302 100644
+index 377e8f8..2982f48 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
-@@ -1360,7 +1360,9 @@ static int __init intel_cqm_init(void)
+@@ -1364,7 +1364,9 @@ static int __init intel_cqm_init(void)
goto out;
}
@@ -21854,7 +24367,7 @@ index cb77b11..8867302 100644
ret = intel_cqm_setup_rmid_cache();
if (ret)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
-index 123ff1b..d53e500 100644
+index 183de71..bd34d52 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
@@ -116,16 +116,12 @@ static const struct attribute_group *pt_attr_groups[] = {
@@ -21925,7 +24438,7 @@ index 123ff1b..d53e500 100644
}
#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC)
-@@ -928,7 +910,7 @@ static void pt_event_start(struct perf_event *event, int mode)
+@@ -929,7 +911,7 @@ static void pt_event_start(struct perf_event *event, int mode)
return;
}
@@ -21934,7 +24447,7 @@ index 123ff1b..d53e500 100644
event->hw.state = 0;
pt_config_buffer(buf->cur->table, buf->cur_idx,
-@@ -945,7 +927,7 @@ static void pt_event_stop(struct perf_event *event, int mode)
+@@ -946,7 +928,7 @@ static void pt_event_stop(struct perf_event *event, int mode)
* Protect against the PMI racing with disabling wrmsr,
* see comment in intel_pt_interrupt().
*/
@@ -21944,10 +24457,10 @@ index 123ff1b..d53e500 100644
if (event->hw.state == PERF_HES_STOPPED)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
-index 358c54a..f068235 100644
+index 5cbd4e6..ee9388a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
-@@ -487,7 +487,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
+@@ -486,7 +486,7 @@ static struct attribute *rapl_events_hsw_attr[] = {
NULL,
};
@@ -21957,10 +24470,10 @@ index 358c54a..f068235 100644
.attrs = NULL, /* patched at runtime */
};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
-index 90b7c50..7863ae3 100644
+index 21b5e38..84f1f82 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
-@@ -732,7 +732,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
+@@ -731,7 +731,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
static int __init uncore_type_init(struct intel_uncore_type *type)
{
struct intel_uncore_pmu *pmus;
@@ -21970,7 +24483,7 @@ index 90b7c50..7863ae3 100644
int i, j;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
-index ceac8f5..a562de7 100644
+index 0f77f0a..d3c6b7d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -115,7 +115,7 @@ struct intel_uncore_box {
@@ -22375,10 +24888,10 @@ index 5f1c626..1cba97e 100644
+EXPORT_SYMBOL(pax_check_alloca);
+#endif
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
-index e2ce85d..00ccad0 100644
+index a102564..d1f0b73 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
-@@ -802,8 +802,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
+@@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
static void early_panic(char *msg)
{
@@ -22390,7 +24903,7 @@ index e2ce85d..00ccad0 100644
static int userdef __initdata;
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
-index 89427d8..00c0d52 100644
+index eec40f5..4fee808 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -7,6 +7,7 @@
@@ -22401,1864 +24914,471 @@ index 89427d8..00c0d52 100644
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/fcntl.h>
-diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
-index 1c30976..71b41b9 100644
---- a/arch/x86/kernel/entry_32.S
-+++ b/arch/x86/kernel/entry_32.S
-@@ -177,13 +177,154 @@
- /*CFI_REL_OFFSET gs, PT_GS*/
- .endm
- .macro SET_KERNEL_GS reg
-+
-+#ifdef CONFIG_CC_STACKPROTECTOR
- movl $(__KERNEL_STACK_CANARY), \reg
-+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
-+ movl $(__USER_DS), \reg
-+#else
-+ xorl \reg, \reg
-+#endif
-+
- movl \reg, %gs
- .endm
-
- #endif /* CONFIG_X86_32_LAZY_GS */
-
--.macro SAVE_ALL
-+.macro pax_enter_kernel
-+#ifdef CONFIG_PAX_KERNEXEC
-+ call pax_enter_kernel
-+#endif
-+.endm
-+
-+.macro pax_exit_kernel
-+#ifdef CONFIG_PAX_KERNEXEC
-+ call pax_exit_kernel
-+#endif
-+.endm
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ENTRY(pax_enter_kernel)
-+#ifdef CONFIG_PARAVIRT
-+ pushl %eax
-+ pushl %ecx
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
-+ mov %eax, %esi
-+#else
-+ mov %cr0, %esi
-+#endif
-+ bts $X86_CR0_WP_BIT, %esi
-+ jnc 1f
-+ mov %cs, %esi
-+ cmp $__KERNEL_CS, %esi
-+ jz 3f
-+ ljmp $__KERNEL_CS, $3f
-+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
-+2:
-+#ifdef CONFIG_PARAVIRT
-+ mov %esi, %eax
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
-+#else
-+ mov %esi, %cr0
-+#endif
-+3:
-+#ifdef CONFIG_PARAVIRT
-+ popl %ecx
-+ popl %eax
-+#endif
-+ ret
-+ENDPROC(pax_enter_kernel)
-+
-+ENTRY(pax_exit_kernel)
-+#ifdef CONFIG_PARAVIRT
-+ pushl %eax
-+ pushl %ecx
-+#endif
-+ mov %cs, %esi
-+ cmp $__KERNEXEC_KERNEL_CS, %esi
-+ jnz 2f
-+#ifdef CONFIG_PARAVIRT
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
-+ mov %eax, %esi
-+#else
-+ mov %cr0, %esi
-+#endif
-+ btr $X86_CR0_WP_BIT, %esi
-+ ljmp $__KERNEL_CS, $1f
-+1:
-+#ifdef CONFIG_PARAVIRT
-+ mov %esi, %eax
-+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
-+#else
-+ mov %esi, %cr0
-+#endif
-+2:
-+#ifdef CONFIG_PARAVIRT
-+ popl %ecx
-+ popl %eax
-+#endif
-+ ret
-+ENDPROC(pax_exit_kernel)
-+#endif
-+
-+ .macro pax_erase_kstack
-+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ call pax_erase_kstack
-+#endif
-+ .endm
-+
-+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+/*
-+ * ebp: thread_info
-+ */
-+ENTRY(pax_erase_kstack)
-+ pushl %edi
-+ pushl %ecx
-+ pushl %eax
-+
-+ mov TI_lowest_stack(%ebp), %edi
-+ mov $-0xBEEF, %eax
-+ std
-+
-+1: mov %edi, %ecx
-+ and $THREAD_SIZE_asm - 1, %ecx
-+ shr $2, %ecx
-+ repne scasl
-+ jecxz 2f
-+
-+ cmp $2*16, %ecx
-+ jc 2f
-+
-+ mov $2*16, %ecx
-+ repe scasl
-+ jecxz 2f
-+ jne 1b
-+
-+2: cld
-+ or $2*4, %edi
-+ mov %esp, %ecx
-+ sub %edi, %ecx
-+
-+ cmp $THREAD_SIZE_asm, %ecx
-+ jb 3f
-+ ud2
-+3:
-+
-+ shr $2, %ecx
-+ rep stosl
-+
-+ mov TI_task_thread_sp0(%ebp), %edi
-+ sub $128, %edi
-+ mov %edi, TI_lowest_stack(%ebp)
-+
-+ popl %eax
-+ popl %ecx
-+ popl %edi
-+ ret
-+ENDPROC(pax_erase_kstack)
-+#endif
-+
-+.macro __SAVE_ALL _DS
- cld
- PUSH_GS
- pushl_cfi %fs
-@@ -206,7 +347,7 @@
- CFI_REL_OFFSET ecx, 0
- pushl_cfi %ebx
- CFI_REL_OFFSET ebx, 0
-- movl $(__USER_DS), %edx
-+ movl $\_DS, %edx
- movl %edx, %ds
- movl %edx, %es
- movl $(__KERNEL_PERCPU), %edx
-@@ -214,6 +355,15 @@
- SET_KERNEL_GS %edx
- .endm
-
-+.macro SAVE_ALL
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ __SAVE_ALL __KERNEL_DS
-+ pax_enter_kernel
-+#else
-+ __SAVE_ALL __USER_DS
-+#endif
-+.endm
-+
- .macro RESTORE_INT_REGS
- popl_cfi %ebx
- CFI_RESTORE ebx
-@@ -297,7 +447,7 @@ ENTRY(ret_from_fork)
- popfl_cfi
- jmp syscall_exit
- CFI_ENDPROC
--END(ret_from_fork)
-+ENDPROC(ret_from_fork)
-
- ENTRY(ret_from_kernel_thread)
- CFI_STARTPROC
-@@ -340,7 +490,15 @@ ret_from_intr:
- andl $SEGMENT_RPL_MASK, %eax
- #endif
- cmpl $USER_RPL, %eax
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ jae resume_userspace
-+
-+ pax_exit_kernel
-+ jmp resume_kernel
-+#else
- jb resume_kernel # not returning to v8086 or userspace
-+#endif
-
- ENTRY(resume_userspace)
- LOCKDEP_SYS_EXIT
-@@ -352,8 +510,8 @@ ENTRY(resume_userspace)
- andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
- # int/exception return?
- jne work_pending
-- jmp restore_all
--END(ret_from_exception)
-+ jmp restore_all_pax
-+ENDPROC(ret_from_exception)
-
- #ifdef CONFIG_PREEMPT
- ENTRY(resume_kernel)
-@@ -365,7 +523,7 @@ need_resched:
- jz restore_all
- call preempt_schedule_irq
- jmp need_resched
--END(resume_kernel)
-+ENDPROC(resume_kernel)
- #endif
- CFI_ENDPROC
-
-@@ -395,33 +553,45 @@ sysenter_past_esp:
- /*CFI_REL_OFFSET cs, 0*/
- /*
- * Push current_thread_info()->sysenter_return to the stack.
-- * A tiny bit of offset fixup is necessary: TI_sysenter_return
-- * is relative to thread_info, which is at the bottom of the
-- * kernel stack page. 4*4 means the 4 words pushed above;
-- * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
-- * and THREAD_SIZE takes us to the bottom.
- */
-- pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
-+ pushl_cfi $0
- CFI_REL_OFFSET eip, 0
-
- pushl_cfi %eax
- SAVE_ALL
-+ GET_THREAD_INFO(%ebp)
-+ movl TI_sysenter_return(%ebp),%ebp
-+ movl %ebp,PT_EIP(%esp)
- ENABLE_INTERRUPTS(CLBR_NONE)
+diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
+index ce95676..da8c6ff 100644
+--- a/arch/x86/kernel/espfix_64.c
++++ b/arch/x86/kernel/espfix_64.c
+@@ -41,6 +41,7 @@
+ #include <asm/pgalloc.h>
+ #include <asm/setup.h>
+ #include <asm/espfix.h>
++#include <asm/bug.h>
/*
- * Load the potential sixth argument from user stack.
- * Careful about security.
- */
-+ movl PT_OLDESP(%esp),%ebp
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ mov PT_OLDSS(%esp),%ds
-+1: movl %ds:(%ebp),%ebp
-+ push %ss
-+ pop %ds
-+#else
- cmpl $__PAGE_OFFSET-3,%ebp
- jae syscall_fault
- ASM_STAC
- 1: movl (%ebp),%ebp
- ASM_CLAC
-+#endif
-+
- movl %ebp,PT_EBP(%esp)
- _ASM_EXTABLE(1b,syscall_fault)
-
- GET_THREAD_INFO(%ebp)
-
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
- jnz sysenter_audit
- sysenter_do_call:
-@@ -437,12 +607,24 @@ sysenter_after_call:
- testl $_TIF_ALLWORK_MASK, %ecx
- jnz sysexit_audit
- sysenter_exit:
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pushl_cfi %eax
-+ movl %esp, %eax
-+ call pax_randomize_kstack
-+ popl_cfi %eax
-+#endif
-+
-+ pax_erase_kstack
-+
- /* if something modifies registers it must also disable sysexit */
- movl PT_EIP(%esp), %edx
- movl PT_OLDESP(%esp), %ecx
- xorl %ebp,%ebp
- TRACE_IRQS_ON
- 1: mov PT_FS(%esp), %fs
-+2: mov PT_DS(%esp), %ds
-+3: mov PT_ES(%esp), %es
- PTGS_TO_GS
- ENABLE_INTERRUPTS_SYSEXIT
+ * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
+@@ -70,8 +71,10 @@ static DEFINE_MUTEX(espfix_init_mutex);
+ #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
+ static void *espfix_pages[ESPFIX_MAX_PAGES];
-@@ -456,6 +638,9 @@ sysenter_audit:
- pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
- pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
- call __audit_syscall_entry
-+
-+ pax_erase_kstack
-+
- popl_cfi %ecx /* get that remapped edx off the stack */
- popl_cfi %ecx /* get that remapped esi off the stack */
- movl PT_EAX(%esp),%eax /* reload syscall number */
-@@ -482,10 +667,16 @@ sysexit_audit:
-
- CFI_ENDPROC
- .pushsection .fixup,"ax"
--2: movl $0,PT_FS(%esp)
-+4: movl $0,PT_FS(%esp)
-+ jmp 1b
-+5: movl $0,PT_DS(%esp)
-+ jmp 1b
-+6: movl $0,PT_ES(%esp)
- jmp 1b
- .popsection
-- _ASM_EXTABLE(1b,2b)
-+ _ASM_EXTABLE(1b,4b)
-+ _ASM_EXTABLE(2b,5b)
-+ _ASM_EXTABLE(3b,6b)
- PTGS_TO_GS_EX
- ENDPROC(ia32_sysenter_target)
+-static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
+- __aligned(PAGE_SIZE);
++static __page_aligned_rodata pud_t espfix_pud_page[PTRS_PER_PUD];
++static __page_aligned_rodata pmd_t espfix_pmd_page[PTRS_PER_PMD];
++static __page_aligned_rodata pte_t espfix_pte_page[PTRS_PER_PTE];
++static __page_aligned_rodata char espfix_stack_page[ESPFIX_MAX_PAGES][PAGE_SIZE];
-@@ -496,6 +687,11 @@ ENTRY(system_call)
- pushl_cfi %eax # save orig_eax
- SAVE_ALL
- GET_THREAD_INFO(%ebp)
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
- # system call tracing in operation / emulation
- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
- jnz syscall_trace_entry
-@@ -515,6 +711,15 @@ syscall_exit:
- testl $_TIF_ALLWORK_MASK, %ecx # current->work
- jnz syscall_exit_work
+ static unsigned int page_random, slot_random;
-+restore_all_pax:
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ movl %esp, %eax
-+ call pax_randomize_kstack
-+#endif
-+
-+ pax_erase_kstack
-+
- restore_all:
- TRACE_IRQS_IRET
- restore_all_notrace:
-@@ -569,14 +774,34 @@ ldt_ss:
- * compensating for the offset by changing to the ESPFIX segment with
- * a base address that matches for the difference.
- */
--#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
-+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
- mov %esp, %edx /* load kernel esp */
- mov PT_OLDESP(%esp), %eax /* load userspace esp */
- mov %dx, %ax /* eax: new kernel esp */
- sub %eax, %edx /* offset (low word is 0) */
-+#ifdef CONFIG_SMP
-+ movl PER_CPU_VAR(cpu_number), %ebx
-+ shll $PAGE_SHIFT_asm, %ebx
-+ addl $cpu_gdt_table, %ebx
-+#else
-+ movl $cpu_gdt_table, %ebx
-+#endif
- shr $16, %edx
-- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
-- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ mov %cr0, %esi
-+ btr $X86_CR0_WP_BIT, %esi
-+ mov %esi, %cr0
-+#endif
-+
-+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
-+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ bts $X86_CR0_WP_BIT, %esi
-+ mov %esi, %cr0
-+#endif
-+
- pushl_cfi $__ESPFIX_SS
- pushl_cfi %eax /* new kernel esp */
- /* Disable interrupts, but do not irqtrace this section: we
-@@ -606,20 +831,18 @@ work_resched:
- movl TI_flags(%ebp), %ecx
- andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
- # than syscall tracing?
-- jz restore_all
-+ jz restore_all_pax
- testb $_TIF_NEED_RESCHED, %cl
- jnz work_resched
-
- work_notifysig: # deal with pending signals and
- # notify-resume requests
-+ movl %esp, %eax
- #ifdef CONFIG_VM86
- testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
-- movl %esp, %eax
- jnz work_notifysig_v86 # returning to kernel-space or
- # vm86-space
- 1:
--#else
-- movl %esp, %eax
- #endif
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
-@@ -640,7 +863,7 @@ work_notifysig_v86:
- movl %eax, %esp
- jmp 1b
- #endif
--END(work_pending)
-+ENDPROC(work_pending)
+@@ -122,14 +125,25 @@ static void init_espfix_random(void)
+ void __init init_espfix_bsp(void)
+ {
+ pgd_t *pgd_p;
++ pud_t *pud_p;
++ unsigned long addr, index = pgd_index(ESPFIX_BASE_ADDR);
- # perform syscall exit tracing
- ALIGN
-@@ -648,11 +871,14 @@ syscall_trace_entry:
- movl $-ENOSYS,PT_EAX(%esp)
- movl %esp, %eax
- call syscall_trace_enter
-+
-+ pax_erase_kstack
+ /* Install the espfix pud into the kernel page directory */
+- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
+- pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
++ pgd_p = &init_level4_pgt[index];
++ pud_p = espfix_pud_page;
++ paravirt_alloc_pud(&init_mm, __pa(pud_p) >> PAGE_SHIFT);
++ set_pgd(pgd_p, __pgd(PGTABLE_PROT | __pa(pud_p)));
+
- /* What it returned is what we'll actually use. */
- cmpl $(NR_syscalls), %eax
- jnae syscall_call
- jmp syscall_exit
--END(syscall_trace_entry)
-+ENDPROC(syscall_trace_entry)
-
- # perform syscall exit tracing
- ALIGN
-@@ -665,26 +891,30 @@ syscall_exit_work:
- movl %esp, %eax
- call syscall_trace_leave
- jmp resume_userspace
--END(syscall_exit_work)
-+ENDPROC(syscall_exit_work)
- CFI_ENDPROC
-
- RING0_INT_FRAME # can't unwind into user space anyway
- syscall_fault:
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ push %ss
-+ pop %ds
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
++ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
+#endif
- ASM_CLAC
- GET_THREAD_INFO(%ebp)
- movl $-EFAULT,PT_EAX(%esp)
- jmp resume_userspace
--END(syscall_fault)
-+ENDPROC(syscall_fault)
-
- syscall_badsys:
- movl $-ENOSYS,%eax
- jmp syscall_after_call
--END(syscall_badsys)
-+ENDPROC(syscall_badsys)
-
- sysenter_badsys:
- movl $-ENOSYS,%eax
- jmp sysenter_after_call
--END(sysenter_badsys)
-+ENDPROC(sysenter_badsys)
- CFI_ENDPROC
-
- .macro FIXUP_ESPFIX_STACK
-@@ -697,8 +927,15 @@ END(sysenter_badsys)
- */
- #ifdef CONFIG_X86_ESPFIX32
- /* fixup the stack */
-- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
-- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
-+#ifdef CONFIG_SMP
-+ movl PER_CPU_VAR(cpu_number), %ebx
-+ shll $PAGE_SHIFT_asm, %ebx
-+ addl $cpu_gdt_table, %ebx
-+#else
-+ movl $cpu_gdt_table, %ebx
-+#endif
-+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
-+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
- shl $16, %eax
- addl %esp, %eax /* the adjusted stack pointer */
- pushl_cfi $__KERNEL_DS
-@@ -737,7 +974,7 @@ ENTRY(irq_entries_start)
- CFI_ADJUST_CFA_OFFSET -4
- .align 8
- .endr
--END(irq_entries_start)
-+ENDPROC(irq_entries_start)
-
- /*
- * the CPU automatically disables interrupts when executing an IRQ vector,
-@@ -790,7 +1027,7 @@ ENTRY(coprocessor_error)
- pushl_cfi $do_coprocessor_error
- jmp error_code
- CFI_ENDPROC
--END(coprocessor_error)
-+ENDPROC(coprocessor_error)
-
- ENTRY(simd_coprocessor_error)
- RING0_INT_FRAME
-@@ -806,7 +1043,7 @@ ENTRY(simd_coprocessor_error)
- #endif
- jmp error_code
- CFI_ENDPROC
--END(simd_coprocessor_error)
-+ENDPROC(simd_coprocessor_error)
-
- ENTRY(device_not_available)
- RING0_INT_FRAME
-@@ -815,18 +1052,18 @@ ENTRY(device_not_available)
- pushl_cfi $do_device_not_available
- jmp error_code
- CFI_ENDPROC
--END(device_not_available)
-+ENDPROC(device_not_available)
-
- #ifdef CONFIG_PARAVIRT
- ENTRY(native_iret)
- iret
- _ASM_EXTABLE(native_iret, iret_exc)
--END(native_iret)
-+ENDPROC(native_iret)
-
- ENTRY(native_irq_enable_sysexit)
- sti
- sysexit
--END(native_irq_enable_sysexit)
-+ENDPROC(native_irq_enable_sysexit)
- #endif
-
- ENTRY(overflow)
-@@ -836,7 +1073,7 @@ ENTRY(overflow)
- pushl_cfi $do_overflow
- jmp error_code
- CFI_ENDPROC
--END(overflow)
-+ENDPROC(overflow)
-
- ENTRY(bounds)
- RING0_INT_FRAME
-@@ -845,7 +1082,7 @@ ENTRY(bounds)
- pushl_cfi $do_bounds
- jmp error_code
- CFI_ENDPROC
--END(bounds)
-+ENDPROC(bounds)
-
- ENTRY(invalid_op)
- RING0_INT_FRAME
-@@ -854,7 +1091,7 @@ ENTRY(invalid_op)
- pushl_cfi $do_invalid_op
- jmp error_code
- CFI_ENDPROC
--END(invalid_op)
-+ENDPROC(invalid_op)
-
- ENTRY(coprocessor_segment_overrun)
- RING0_INT_FRAME
-@@ -863,7 +1100,7 @@ ENTRY(coprocessor_segment_overrun)
- pushl_cfi $do_coprocessor_segment_overrun
- jmp error_code
- CFI_ENDPROC
--END(coprocessor_segment_overrun)
-+ENDPROC(coprocessor_segment_overrun)
-
- ENTRY(invalid_TSS)
- RING0_EC_FRAME
-@@ -871,7 +1108,7 @@ ENTRY(invalid_TSS)
- pushl_cfi $do_invalid_TSS
- jmp error_code
- CFI_ENDPROC
--END(invalid_TSS)
-+ENDPROC(invalid_TSS)
-
- ENTRY(segment_not_present)
- RING0_EC_FRAME
-@@ -879,7 +1116,7 @@ ENTRY(segment_not_present)
- pushl_cfi $do_segment_not_present
- jmp error_code
- CFI_ENDPROC
--END(segment_not_present)
-+ENDPROC(segment_not_present)
-
- ENTRY(stack_segment)
- RING0_EC_FRAME
-@@ -887,7 +1124,7 @@ ENTRY(stack_segment)
- pushl_cfi $do_stack_segment
- jmp error_code
- CFI_ENDPROC
--END(stack_segment)
-+ENDPROC(stack_segment)
-
- ENTRY(alignment_check)
- RING0_EC_FRAME
-@@ -895,7 +1132,7 @@ ENTRY(alignment_check)
- pushl_cfi $do_alignment_check
- jmp error_code
- CFI_ENDPROC
--END(alignment_check)
-+ENDPROC(alignment_check)
- ENTRY(divide_error)
- RING0_INT_FRAME
-@@ -904,7 +1141,7 @@ ENTRY(divide_error)
- pushl_cfi $do_divide_error
- jmp error_code
- CFI_ENDPROC
--END(divide_error)
-+ENDPROC(divide_error)
-
- #ifdef CONFIG_X86_MCE
- ENTRY(machine_check)
-@@ -914,7 +1151,7 @@ ENTRY(machine_check)
- pushl_cfi machine_check_vector
- jmp error_code
- CFI_ENDPROC
--END(machine_check)
-+ENDPROC(machine_check)
- #endif
-
- ENTRY(spurious_interrupt_bug)
-@@ -924,7 +1161,7 @@ ENTRY(spurious_interrupt_bug)
- pushl_cfi $do_spurious_interrupt_bug
- jmp error_code
- CFI_ENDPROC
--END(spurious_interrupt_bug)
-+ENDPROC(spurious_interrupt_bug)
-
- #ifdef CONFIG_XEN
- /* Xen doesn't set %esp to be precisely what the normal sysenter
-@@ -1033,7 +1270,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
-
- ENTRY(mcount)
- ret
--END(mcount)
-+ENDPROC(mcount)
-
- ENTRY(ftrace_caller)
- pushl %eax
-@@ -1063,7 +1300,7 @@ ftrace_graph_call:
- .globl ftrace_stub
- ftrace_stub:
- ret
--END(ftrace_caller)
-+ENDPROC(ftrace_caller)
-
- ENTRY(ftrace_regs_caller)
- pushf /* push flags before compare (in cs location) */
-@@ -1161,7 +1398,7 @@ trace:
- popl %ecx
- popl %eax
- jmp ftrace_stub
--END(mcount)
-+ENDPROC(mcount)
- #endif /* CONFIG_DYNAMIC_FTRACE */
- #endif /* CONFIG_FUNCTION_TRACER */
-
-@@ -1179,7 +1416,7 @@ ENTRY(ftrace_graph_caller)
- popl %ecx
- popl %eax
- ret
--END(ftrace_graph_caller)
-+ENDPROC(ftrace_graph_caller)
-
- .globl return_to_handler
- return_to_handler:
-@@ -1233,15 +1470,18 @@ error_code:
- movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
- REG_TO_PTGS %ecx
- SET_KERNEL_GS %ecx
-- movl $(__USER_DS), %ecx
-+ movl $(__KERNEL_DS), %ecx
- movl %ecx, %ds
- movl %ecx, %es
-+
-+ pax_enter_kernel
-+
- TRACE_IRQS_OFF
- movl %esp,%eax # pt_regs pointer
- call *%edi
- jmp ret_from_exception
- CFI_ENDPROC
--END(page_fault)
-+ENDPROC(page_fault)
-
- /*
- * Debug traps and NMI can happen at the one SYSENTER instruction
-@@ -1284,7 +1524,7 @@ debug_stack_correct:
- call do_debug
- jmp ret_from_exception
- CFI_ENDPROC
--END(debug)
-+ENDPROC(debug)
-
- /*
- * NMI is doubly nasty. It can happen _while_ we're handling
-@@ -1324,6 +1564,9 @@ nmi_stack_correct:
- xorl %edx,%edx # zero error code
- movl %esp,%eax # pt_regs pointer
- call do_nmi
-+
-+ pax_exit_kernel
-+
- jmp restore_all_notrace
- CFI_ENDPROC
+ /* Randomize the locations */
+ init_espfix_random();
-@@ -1361,13 +1604,16 @@ nmi_espfix_stack:
- FIXUP_ESPFIX_STACK # %eax == %esp
- xorl %edx,%edx # zero error code
- call do_nmi
++ addr = espfix_base_addr(0);
+
-+ pax_exit_kernel
-+
- RESTORE_REGS
- lss 12+4(%esp), %esp # back to espfix stack
- CFI_ADJUST_CFA_OFFSET -24
- jmp irq_return
- #endif
- CFI_ENDPROC
--END(nmi)
-+ENDPROC(nmi)
-
- ENTRY(int3)
- RING0_INT_FRAME
-@@ -1380,14 +1626,14 @@ ENTRY(int3)
- call do_int3
- jmp ret_from_exception
- CFI_ENDPROC
--END(int3)
-+ENDPROC(int3)
-
- ENTRY(general_protection)
- RING0_EC_FRAME
- pushl_cfi $do_general_protection
- jmp error_code
- CFI_ENDPROC
--END(general_protection)
-+ENDPROC(general_protection)
-
- #ifdef CONFIG_KVM_GUEST
- ENTRY(async_page_fault)
-@@ -1396,6 +1642,6 @@ ENTRY(async_page_fault)
- pushl_cfi $do_async_page_fault
- jmp error_code
- CFI_ENDPROC
--END(async_page_fault)
-+ENDPROC(async_page_fault)
- #endif
+ /* The rest is the same as for any other processor */
+ init_espfix_ap(0);
+ }
+@@ -170,35 +184,39 @@ void init_espfix_ap(int cpu)
+ pud_p = &espfix_pud_page[pud_index(addr)];
+ pud = *pud_p;
+ if (!pud_present(pud)) {
+- struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
+-
+- pmd_p = (pmd_t *)page_address(page);
++ if (cpu)
++ pmd_p = page_address(alloc_pages_node(node, PGALLOC_GFP, 0));
++ else
++ pmd_p = espfix_pmd_page;
+ pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
+ paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
+ for (n = 0; n < ESPFIX_PUD_CLONES; n++)
+ set_pud(&pud_p[n], pud);
+- }
++ } else
++ BUG_ON(!cpu);
-diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
-index 4bd6c19..a0eba01 100644
---- a/arch/x86/kernel/entry_64.S
-+++ b/arch/x86/kernel/entry_64.S
-@@ -46,6 +46,8 @@
- #include <asm/smap.h>
- #include <asm/pgtable_types.h>
- #include <linux/err.h>
-+#include <asm/pgtable.h>
-+#include <asm/alternative-asm.h>
+ pmd_p = pmd_offset(&pud, addr);
+ pmd = *pmd_p;
+ if (!pmd_present(pmd)) {
+- struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
+-
+- pte_p = (pte_t *)page_address(page);
++ if (cpu)
++ pte_p = page_address(alloc_pages_node(node, PGALLOC_GFP, 0));
++ else
++ pte_p = espfix_pte_page;
+ pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
+ paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
+ for (n = 0; n < ESPFIX_PMD_CLONES; n++)
+ set_pmd(&pmd_p[n], pmd);
+- }
++ } else
++ BUG_ON(!cpu);
- /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
- #include <linux/elf-em.h>
-@@ -64,6 +66,401 @@ ENTRY(native_usergs_sysret64)
- ENDPROC(native_usergs_sysret64)
- #endif /* CONFIG_PARAVIRT */
+ pte_p = pte_offset_kernel(&pmd, addr);
+- stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
++ stack_page = espfix_stack_page[page];
+ pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
+ for (n = 0; n < ESPFIX_PTE_CLONES; n++)
+ set_pte(&pte_p[n*PTE_STRIDE], pte);
-+ .macro ljmpq sel, off
-+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
-+ .byte 0x48; ljmp *1234f(%rip)
-+ .pushsection .rodata
-+ .align 16
-+ 1234: .quad \off; .word \sel
-+ .popsection
-+#else
-+ pushq $\sel
-+ pushq $\off
-+ lretq
-+#endif
-+ .endm
-+
-+ .macro pax_enter_kernel
-+ pax_set_fptr_mask
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ call pax_enter_kernel
-+#endif
-+ .endm
-+
-+ .macro pax_exit_kernel
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ call pax_exit_kernel
-+#endif
-+ .endm
-+
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ENTRY(pax_enter_kernel)
-+ pushq %rdi
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_SAVE_REGS(CLBR_RDI)
-+#endif
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ GET_CR0_INTO_RDI
-+ bts $X86_CR0_WP_BIT,%rdi
-+ jnc 3f
-+ mov %cs,%edi
-+ cmp $__KERNEL_CS,%edi
-+ jnz 2f
-+1:
-+#endif
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
-+ GET_CR3_INTO_RDI
-+ cmp $0,%dil
-+ jnz 112f
-+ mov $__KERNEL_DS,%edi
-+ mov %edi,%ss
-+ jmp 111f
-+112: cmp $1,%dil
-+ jz 113f
-+ ud2
-+113: sub $4097,%rdi
-+ bts $63,%rdi
-+ SET_RDI_INTO_CR3
-+ mov $__UDEREF_KERNEL_DS,%edi
-+ mov %edi,%ss
-+111:
-+#endif
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_RESTORE_REGS(CLBR_RDI)
-+#endif
-+
-+ popq %rdi
-+ pax_force_retaddr
-+ retq
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+2: ljmpq __KERNEL_CS,1b
-+3: ljmpq __KERNEXEC_KERNEL_CS,4f
-+4: SET_RDI_INTO_CR0
-+ jmp 1b
-+#endif
-+ENDPROC(pax_enter_kernel)
-+
-+ENTRY(pax_exit_kernel)
-+ pushq %rdi
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_SAVE_REGS(CLBR_RDI)
-+#endif
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ mov %cs,%rdi
-+ cmp $__KERNEXEC_KERNEL_CS,%edi
-+ jz 2f
-+ GET_CR0_INTO_RDI
-+ bts $X86_CR0_WP_BIT,%rdi
-+ jnc 4f
-+1:
-+#endif
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
-+ mov %ss,%edi
-+ cmp $__UDEREF_KERNEL_DS,%edi
-+ jnz 111f
-+ GET_CR3_INTO_RDI
-+ cmp $0,%dil
-+ jz 112f
-+ ud2
-+112: add $4097,%rdi
-+ bts $63,%rdi
-+ SET_RDI_INTO_CR3
-+ mov $__KERNEL_DS,%edi
-+ mov %edi,%ss
-+111:
-+#endif
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_RESTORE_REGS(CLBR_RDI);
-+#endif
-+
-+ popq %rdi
-+ pax_force_retaddr
-+ retq
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+2: GET_CR0_INTO_RDI
-+ btr $X86_CR0_WP_BIT,%rdi
-+ jnc 4f
-+ ljmpq __KERNEL_CS,3f
-+3: SET_RDI_INTO_CR0
-+ jmp 1b
-+4: ud2
-+ jmp 4b
-+#endif
-+ENDPROC(pax_exit_kernel)
-+#endif
-+
-+ .macro pax_enter_kernel_user
-+ pax_set_fptr_mask
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_enter_kernel_user
-+#endif
-+ .endm
-+
-+ .macro pax_exit_kernel_user
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ call pax_exit_kernel_user
-+#endif
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pushq %rax
-+ pushq %r11
-+ call pax_randomize_kstack
-+ popq %r11
-+ popq %rax
-+#endif
-+ .endm
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ENTRY(pax_enter_kernel_user)
-+ pushq %rdi
-+ pushq %rbx
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_SAVE_REGS(CLBR_RDI)
-+#endif
-+
-+ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
-+ GET_CR3_INTO_RDI
-+ cmp $1,%dil
-+ jnz 4f
-+ sub $4097,%rdi
-+ bts $63,%rdi
-+ SET_RDI_INTO_CR3
-+ jmp 3f
-+111:
-+
-+ GET_CR3_INTO_RDI
-+ mov %rdi,%rbx
-+ add $__START_KERNEL_map,%rbx
-+ sub phys_base(%rip),%rbx
-+
-+#ifdef CONFIG_PARAVIRT
-+ cmpl $0, pv_info+PARAVIRT_enabled
-+ jz 1f
-+ pushq %rdi
-+ i = 0
-+ .rept USER_PGD_PTRS
-+ mov i*8(%rbx),%rsi
-+ mov $0,%sil
-+ lea i*8(%rbx),%rdi
-+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
-+ i = i + 1
-+ .endr
-+ popq %rdi
-+ jmp 2f
-+1:
-+#endif
-+
-+ i = 0
-+ .rept USER_PGD_PTRS
-+ movb $0,i*8(%rbx)
-+ i = i + 1
-+ .endr
-+
-+2: SET_RDI_INTO_CR3
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ GET_CR0_INTO_RDI
-+ bts $X86_CR0_WP_BIT,%rdi
-+ SET_RDI_INTO_CR0
-+#endif
-+
-+3:
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_RESTORE_REGS(CLBR_RDI)
-+#endif
-+
-+ popq %rbx
-+ popq %rdi
-+ pax_force_retaddr
-+ retq
-+4: ud2
-+ENDPROC(pax_enter_kernel_user)
-+
-+ENTRY(pax_exit_kernel_user)
-+ pushq %rdi
-+ pushq %rbx
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_SAVE_REGS(CLBR_RDI)
-+#endif
-+
-+ GET_CR3_INTO_RDI
-+ ALTERNATIVE "jmp 1f", "", X86_FEATURE_PCID
-+ cmp $0,%dil
-+ jnz 3f
-+ add $4097,%rdi
-+ bts $63,%rdi
-+ SET_RDI_INTO_CR3
-+ jmp 2f
-+1:
-+
-+ mov %rdi,%rbx
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ GET_CR0_INTO_RDI
-+ btr $X86_CR0_WP_BIT,%rdi
-+ jnc 3f
-+ SET_RDI_INTO_CR0
-+#endif
-+
-+ add $__START_KERNEL_map,%rbx
-+ sub phys_base(%rip),%rbx
-+
-+#ifdef CONFIG_PARAVIRT
-+ cmpl $0, pv_info+PARAVIRT_enabled
-+ jz 1f
-+ i = 0
-+ .rept USER_PGD_PTRS
-+ mov i*8(%rbx),%rsi
-+ mov $0x67,%sil
-+ lea i*8(%rbx),%rdi
-+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
-+ i = i + 1
-+ .endr
-+ jmp 2f
-+1:
-+#endif
-+
-+ i = 0
-+ .rept USER_PGD_PTRS
-+ movb $0x67,i*8(%rbx)
-+ i = i + 1
-+ .endr
-+2:
-+
-+#ifdef CONFIG_PARAVIRT
-+ PV_RESTORE_REGS(CLBR_RDI)
-+#endif
-+
-+ popq %rbx
-+ popq %rdi
-+ pax_force_retaddr
-+ retq
-+3: ud2
-+ENDPROC(pax_exit_kernel_user)
-+#endif
-+
-+ .macro pax_enter_kernel_nmi
-+ pax_set_fptr_mask
-+
-+#ifdef CONFIG_PAX_KERNEXEC
-+ GET_CR0_INTO_RDI
-+ bts $X86_CR0_WP_BIT,%rdi
-+ jc 110f
-+ SET_RDI_INTO_CR0
-+ or $2,%ebx
-+110:
-+#endif
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
-+ GET_CR3_INTO_RDI
-+ cmp $0,%dil
-+ jz 111f
-+ sub $4097,%rdi
-+ or $4,%ebx
-+ bts $63,%rdi
-+ SET_RDI_INTO_CR3
-+ mov $__UDEREF_KERNEL_DS,%edi
-+ mov %edi,%ss
-+111:
-+#endif
-+ .endm
-+
-+ .macro pax_exit_kernel_nmi
-+#ifdef CONFIG_PAX_KERNEXEC
-+ btr $1,%ebx
-+ jnc 110f
-+ GET_CR0_INTO_RDI
-+ btr $X86_CR0_WP_BIT,%rdi
-+ SET_RDI_INTO_CR0
-+110:
-+#endif
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
-+ btr $2,%ebx
-+ jnc 111f
-+ GET_CR3_INTO_RDI
-+ add $4097,%rdi
-+ bts $63,%rdi
-+ SET_RDI_INTO_CR3
-+ mov $__KERNEL_DS,%edi
-+ mov %edi,%ss
-+111:
-+#endif
-+ .endm
-+
-+ .macro pax_erase_kstack
-+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ call pax_erase_kstack
-+#endif
-+ .endm
-+
-+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
-+ENTRY(pax_erase_kstack)
-+ pushq %rdi
-+ pushq %rcx
-+ pushq %rax
-+ pushq %r11
-+
-+ GET_THREAD_INFO(%r11)
-+ mov TI_lowest_stack(%r11), %rdi
-+ mov $-0xBEEF, %rax
-+ std
-+
-+1: mov %edi, %ecx
-+ and $THREAD_SIZE_asm - 1, %ecx
-+ shr $3, %ecx
-+ repne scasq
-+ jecxz 2f
-+
-+ cmp $2*8, %ecx
-+ jc 2f
-+
-+ mov $2*8, %ecx
-+ repe scasq
-+ jecxz 2f
-+ jne 1b
-+
-+2: cld
-+ or $2*8, %rdi
-+ mov %esp, %ecx
-+ sub %edi, %ecx
-+
-+ cmp $THREAD_SIZE_asm, %rcx
-+ jb 3f
-+ ud2
-+3:
-+
-+ shr $3, %ecx
-+ rep stosq
-+
-+ mov TI_task_thread_sp0(%r11), %rdi
-+ sub $256, %rdi
-+ mov %rdi, TI_lowest_stack(%r11)
-+
-+ popq %r11
-+ popq %rax
-+ popq %rcx
-+ popq %rdi
-+ pax_force_retaddr
-+ ret
-+ENDPROC(pax_erase_kstack)
-+#endif
+ /* Job is done for this CPU and any CPU which shares this page */
+- ACCESS_ONCE(espfix_pages[page]) = stack_page;
++ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
- .macro TRACE_IRQS_IRETQ
- #ifdef CONFIG_TRACE_IRQFLAGS
-@@ -100,7 +497,7 @@ ENDPROC(native_usergs_sysret64)
- .endm
+ unlock_done:
+ mutex_unlock(&espfix_init_mutex);
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index d25097c..84b0d51 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -127,7 +127,7 @@ void __kernel_fpu_end(void)
+ struct fpu *fpu = &current->thread.fpu;
+
+ if (fpu->fpregs_active)
+- copy_kernel_to_fpregs(&fpu->state);
++ copy_kernel_to_fpregs(fpu->state);
+ else
+ __fpregs_deactivate_hw();
- .macro TRACE_IRQS_IRETQ_DEBUG
-- bt $9,EFLAGS(%rsp) /* interrupts off? */
-+ bt $X86_EFLAGS_IF_BIT,EFLAGS(%rsp) /* interrupts off? */
- jnc 1f
- TRACE_IRQS_ON_DEBUG
- 1:
-@@ -221,14 +618,6 @@ GLOBAL(system_call_after_swapgs)
- /* Construct struct pt_regs on stack */
- pushq_cfi $__USER_DS /* pt_regs->ss */
- pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
-- /*
-- * Re-enable interrupts.
-- * We use 'rsp_scratch' as a scratch space, hence irq-off block above
-- * must execute atomically in the face of possible interrupt-driven
-- * task preemption. We must enable interrupts only after we're done
-- * with using rsp_scratch:
-- */
-- ENABLE_INTERRUPTS(CLBR_NONE)
- pushq_cfi %r11 /* pt_regs->flags */
- pushq_cfi $__USER_CS /* pt_regs->cs */
- pushq_cfi %rcx /* pt_regs->ip */
-@@ -246,7 +635,27 @@ GLOBAL(system_call_after_swapgs)
- sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
- CFI_ADJUST_CFA_OFFSET 6*8
-
-- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
-+ movq_cfi r12, R12
-+#endif
-+
-+ pax_enter_kernel_user
-+
-+#ifdef CONFIG_PAX_RANDKSTACK
-+ pax_erase_kstack
-+#endif
-+
-+ /*
-+ * Re-enable interrupts.
-+ * We use 'rsp_scratch' as a scratch space, hence irq-off block above
-+ * must execute atomically in the face of possible interrupt-driven
-+ * task preemption. We must enable interrupts only after we're done
-+ * with using rsp_scratch:
-+ */
-+ ENABLE_INTERRUPTS(CLBR_NONE)
-+
-+ GET_THREAD_INFO(%rcx)
-+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
- jnz tracesys
- system_call_fastpath:
- #if __SYSCALL_MASK == ~0
-@@ -279,10 +688,13 @@ system_call_fastpath:
- * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
- * very bad.
+@@ -238,7 +238,7 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+ * leak into the child task:
*/
-- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-+ GET_THREAD_INFO(%rcx)
-+ testl $_TIF_ALLWORK_MASK,TI_flags(%rcx)
- jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
-
- CFI_REMEMBER_STATE
-+ pax_exit_kernel_user
-+ pax_erase_kstack
+ if (use_eager_fpu())
+- memset(&dst_fpu->state.xsave, 0, xstate_size);
++ memset(&dst_fpu->state->xsave, 0, xstate_size);
- RESTORE_C_REGS_EXCEPT_RCX_R11
- movq RIP(%rsp),%rcx
-@@ -316,6 +728,9 @@ tracesys:
- call syscall_trace_enter_phase1
- test %rax, %rax
- jnz tracesys_phase2 /* if needed, run the slow path */
-+
-+ pax_erase_kstack
-+
- RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
- movq ORIG_RAX(%rsp), %rax
- jmp system_call_fastpath /* and return to the fast path */
-@@ -327,6 +742,8 @@ tracesys_phase2:
- movq %rax,%rdx
- call syscall_trace_enter_phase2
-
-+ pax_erase_kstack
-+
/*
- * Reload registers from stack in case ptrace changed them.
- * We don't reload %rax because syscall_trace_entry_phase2() returned
-@@ -364,6 +781,8 @@ GLOBAL(int_with_check)
- andl %edi,%edx
- jnz int_careful
- andl $~TS_COMPAT,TI_status(%rcx)
-+ pax_exit_kernel_user
-+ pax_erase_kstack
- jmp syscall_return
-
- /* Either reschedule or signal or syscall exit tracking needed. */
-@@ -485,7 +904,7 @@ opportunistic_sysret_failed:
- SWAPGS
- jmp restore_c_regs_and_iret
- CFI_ENDPROC
--END(system_call)
-+ENDPROC(system_call)
-
-
- .macro FORK_LIKE func
-@@ -495,7 +914,7 @@ ENTRY(stub_\func)
- SAVE_EXTRA_REGS 8
- jmp sys_\func
- CFI_ENDPROC
--END(stub_\func)
-+ENDPROC(stub_\func)
- .endm
-
- FORK_LIKE clone
-@@ -519,7 +938,7 @@ return_from_execve:
- movq %rax,RAX(%rsp)
- jmp int_ret_from_sys_call
- CFI_ENDPROC
--END(stub_execve)
-+ENDPROC(stub_execve)
- /*
- * Remaining execve stubs are only 7 bytes long.
- * ENTRY() often aligns to 16 bytes, which in this case has no benefits.
-@@ -531,7 +950,7 @@ GLOBAL(stub_execveat)
- call sys_execveat
- jmp return_from_execve
- CFI_ENDPROC
--END(stub_execveat)
-+ENDPROC(stub_execveat)
-
- #ifdef CONFIG_X86_X32_ABI
- .align 8
-@@ -541,7 +960,7 @@ GLOBAL(stub_x32_execve)
- call compat_sys_execve
- jmp return_from_execve
- CFI_ENDPROC
--END(stub_x32_execve)
-+ENDPROC(stub_x32_execve)
- .align 8
- GLOBAL(stub_x32_execveat)
- CFI_STARTPROC
-@@ -549,7 +968,7 @@ GLOBAL(stub_x32_execveat)
- call compat_sys_execveat
- jmp return_from_execve
- CFI_ENDPROC
--END(stub_x32_execveat)
-+ENDPROC(stub_x32_execveat)
- #endif
+ * Save current FPU registers directly into the child
+@@ -285,7 +285,7 @@ void fpu__activate_curr(struct fpu *fpu)
+ WARN_ON_FPU(fpu != &current->thread.fpu);
+
+ if (!fpu->fpstate_active) {
+- fpstate_init(&fpu->state);
++ fpstate_init(fpu->state);
+
+ /* Safe to do for the current task: */
+ fpu->fpstate_active = 1;
+@@ -311,7 +311,7 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
+ fpu__save(fpu);
+ } else {
+ if (!fpu->fpstate_active) {
+- fpstate_init(&fpu->state);
++ fpstate_init(fpu->state);
+
+ /* Safe to do for current and for stopped child tasks: */
+ fpu->fpstate_active = 1;
+@@ -344,7 +344,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
+ /* Invalidate any lazy state: */
+ fpu->last_cpu = -1;
+ } else {
+- fpstate_init(&fpu->state);
++ fpstate_init(fpu->state);
+
+ /* Safe to do for stopped child tasks: */
+ fpu->fpstate_active = 1;
+@@ -368,7 +368,7 @@ void fpu__restore(struct fpu *fpu)
+ /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
+ kernel_fpu_disable();
+ fpregs_activate(fpu);
+- copy_kernel_to_fpregs(&fpu->state);
++ copy_kernel_to_fpregs(fpu->state);
+ fpu->counter++;
+ kernel_fpu_enable();
+ }
+@@ -442,25 +442,25 @@ void fpu__clear(struct fpu *fpu)
+ static inline unsigned short get_fpu_cwd(struct fpu *fpu)
+ {
+ if (cpu_has_fxsr) {
+- return fpu->state.fxsave.cwd;
++ return fpu->state->fxsave.cwd;
+ } else {
+- return (unsigned short)fpu->state.fsave.cwd;
++ return (unsigned short)fpu->state->fsave.cwd;
+ }
+ }
- #ifdef CONFIG_IA32_EMULATION
-@@ -592,7 +1011,7 @@ return_from_stub:
- movq %rax,RAX(%rsp)
- jmp int_ret_from_sys_call
- CFI_ENDPROC
--END(stub_rt_sigreturn)
-+ENDPROC(stub_rt_sigreturn)
+ static inline unsigned short get_fpu_swd(struct fpu *fpu)
+ {
+ if (cpu_has_fxsr) {
+- return fpu->state.fxsave.swd;
++ return fpu->state->fxsave.swd;
+ } else {
+- return (unsigned short)fpu->state.fsave.swd;
++ return (unsigned short)fpu->state->fsave.swd;
+ }
+ }
- #ifdef CONFIG_X86_X32_ABI
- ENTRY(stub_x32_rt_sigreturn)
-@@ -602,7 +1021,7 @@ ENTRY(stub_x32_rt_sigreturn)
- call sys32_x32_rt_sigreturn
- jmp return_from_stub
- CFI_ENDPROC
--END(stub_x32_rt_sigreturn)
-+ENDPROC(stub_x32_rt_sigreturn)
+ static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
+ {
+ if (cpu_has_xmm) {
+- return fpu->state.fxsave.mxcsr;
++ return fpu->state->fxsave.mxcsr;
+ } else {
+ return MXCSR_DEFAULT;
+ }
+diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
+index d14e9ac..8ca141b 100644
+--- a/arch/x86/kernel/fpu/init.c
++++ b/arch/x86/kernel/fpu/init.c
+@@ -42,7 +42,7 @@ static void fpu__init_cpu_generic(void)
+ /* Flush out any pending x87 state: */
+ #ifdef CONFIG_MATH_EMULATION
+ if (!cpu_has_fpu)
+- fpstate_init_soft(&current->thread.fpu.state.soft);
++ fpstate_init_soft(&current->thread.fpu.state->soft);
+ else
#endif
+ asm volatile ("fninit");
+@@ -147,12 +147,14 @@ EXPORT_SYMBOL_GPL(xstate_size);
+ #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
+ BUILD_BUG_ON(sizeof(TYPE) != offsetofend(TYPE, MEMBER))
++union fpregs_state init_fpregs_state;
++
/*
-@@ -622,7 +1041,7 @@ ENTRY(ret_from_fork)
-
- RESTORE_EXTRA_REGS
-
-- testl $3,CS(%rsp) # from kernel_thread?
-+ testb $3,CS(%rsp) # from kernel_thread?
+ * We append the 'struct fpu' to the task_struct:
+ */
+ static void __init fpu__init_task_struct_size(void)
+ {
+- int task_size = sizeof(struct task_struct);
++ size_t task_size = sizeof(struct task_struct);
/*
- * By the time we get here, we have no idea whether our pt_regs,
-@@ -641,7 +1060,7 @@ ENTRY(ret_from_fork)
- RESTORE_EXTRA_REGS
- jmp int_ret_from_sys_call
- CFI_ENDPROC
--END(ret_from_fork)
-+ENDPROC(ret_from_fork)
+ * Subtract off the static size of the register state.
+@@ -168,16 +170,12 @@ static void __init fpu__init_task_struct_size(void)
- /*
- * Build the entry stubs with some assembler magic.
-@@ -659,7 +1078,7 @@ ENTRY(irq_entries_start)
- .align 8
- .endr
- CFI_ENDPROC
--END(irq_entries_start)
-+ENDPROC(irq_entries_start)
+ /*
+ * We dynamically size 'struct fpu', so we require that
+- * it be at the end of 'thread_struct' and that
+- * 'thread_struct' be at the end of 'task_struct'. If
++ * it be at the end of 'thread_struct'. If
+ * you hit a compile error here, check the structure to
+ * see if something got added to the end.
+ */
+ CHECK_MEMBER_AT_END_OF(struct fpu, state);
+ CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
+- CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
+-
+- arch_task_struct_size = task_size;
+ }
/*
- * Interrupt entry/exit.
-@@ -672,21 +1091,13 @@ END(irq_entries_start)
- /* 0(%rsp): ~(interrupt number) */
- .macro interrupt func
- cld
-- /*
-- * Since nothing in interrupt handling code touches r12...r15 members
-- * of "struct pt_regs", and since interrupts can nest, we can save
-- * four stack slots and simultaneously provide
-- * an unwind-friendly stack layout by saving "truncated" pt_regs
-- * exactly up to rbp slot, without these members.
-- */
-- ALLOC_PT_GPREGS_ON_STACK -RBP
-- SAVE_C_REGS -RBP
-- /* this goes to 0(%rsp) for unwinder, not for saving the value: */
-- SAVE_EXTRA_REGS_RBP -RBP
-+ ALLOC_PT_GPREGS_ON_STACK
-+ SAVE_C_REGS
-+ SAVE_EXTRA_REGS
-
-- leaq -RBP(%rsp),%rdi /* arg1 for \func (pointer to pt_regs) */
-+ movq %rsp,%rdi /* arg1 for \func (pointer to pt_regs) */
-
-- testl $3, CS-RBP(%rsp)
-+ testb $3, CS(%rsp)
- je 1f
- SWAPGS
- 1:
-@@ -709,8 +1120,20 @@ END(irq_entries_start)
- CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
- 0x77 /* DW_OP_breg7 (rsp) */, 0, \
- 0x06 /* DW_OP_deref */, \
-- 0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
-+ 0x08 /* DW_OP_const1u */, SIZEOF_PTREGS, \
- 0x22 /* DW_OP_plus */
-+
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ testb $3, CS(%rdi)
-+ jnz 1f
-+ pax_enter_kernel
-+ jmp 2f
-+1: pax_enter_kernel_user
-+2:
-+#else
-+ pax_enter_kernel
-+#endif
-+
- /* We entered an interrupt context - irqs are off: */
- TRACE_IRQS_OFF
+diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
+index dc60810..6c8a1fa 100644
+--- a/arch/x86/kernel/fpu/regset.c
++++ b/arch/x86/kernel/fpu/regset.c
+@@ -37,7 +37,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
+ fpstate_sanitize_xstate(fpu);
-@@ -735,13 +1158,12 @@ ret_from_intr:
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+- &fpu->state.fxsave, 0, -1);
++ &fpu->state->fxsave, 0, -1);
+ }
- /* Restore saved previous stack */
- popq %rsi
-- CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
-- /* return code expects complete pt_regs - adjust rsp accordingly: */
-- leaq -RBP(%rsi),%rsp
-+ CFI_DEF_CFA rsi,SIZEOF_PTREGS /* reg/off reset after def_cfa_expr */
-+ movq %rsi, %rsp
- CFI_DEF_CFA_REGISTER rsp
-- CFI_ADJUST_CFA_OFFSET RBP
-+ CFI_ADJUST_CFA_OFFSET 0
+ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
+@@ -54,19 +54,19 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
+ fpstate_sanitize_xstate(fpu);
-- testl $3,CS(%rsp)
-+ testb $3,CS(%rsp)
- je retint_kernel
- /* Interrupt came from user space */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+- &fpu->state.fxsave, 0, -1);
++ &fpu->state->fxsave, 0, -1);
-@@ -763,6 +1185,8 @@ retint_swapgs: /* return to user-space */
- * The iretq could re-enable interrupts:
+ /*
+ * mxcsr reserved bits must be masked to zero for security reasons.
*/
- DISABLE_INTERRUPTS(CLBR_ANY)
-+ pax_exit_kernel_user
-+# pax_erase_kstack
- TRACE_IRQS_IRETQ
+- fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
++ fpu->state->fxsave.mxcsr &= mxcsr_feature_mask;
- SWAPGS
-@@ -781,6 +1205,21 @@ retint_kernel:
- jmp 0b
- 1:
- #endif
-+
-+ pax_exit_kernel
-+
-+#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
-+ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
-+ * namely calling EFI runtime services with a phys mapping. We're
-+ * starting off with NOPs and patch in the real instrumentation
-+ * (BTS/OR) before starting any userland process; even before starting
-+ * up the APs.
-+ */
-+ ALTERNATIVE "", "pax_force_retaddr 16*8", X86_FEATURE_ALWAYS
-+#else
-+ pax_force_retaddr RIP
-+#endif
-+
/*
- * The iretq could re-enable interrupts:
+ * update the header bits in the xsave header, indicating the
+ * presence of FP and SSE state.
*/
-@@ -822,15 +1261,15 @@ native_irq_return_ldt:
- SWAPGS
- movq PER_CPU_VAR(espfix_waddr),%rdi
- movq %rax,(0*8)(%rdi) /* RAX */
-- movq (2*8)(%rsp),%rax /* RIP */
-+ movq (2*8 + RIP-RIP)(%rsp),%rax /* RIP */
- movq %rax,(1*8)(%rdi)
-- movq (3*8)(%rsp),%rax /* CS */
-+ movq (2*8 + CS-RIP)(%rsp),%rax /* CS */
- movq %rax,(2*8)(%rdi)
-- movq (4*8)(%rsp),%rax /* RFLAGS */
-+ movq (2*8 + EFLAGS-RIP)(%rsp),%rax /* RFLAGS */
- movq %rax,(3*8)(%rdi)
-- movq (6*8)(%rsp),%rax /* SS */
-+ movq (2*8 + SS-RIP)(%rsp),%rax /* SS */
- movq %rax,(5*8)(%rdi)
-- movq (5*8)(%rsp),%rax /* RSP */
-+ movq (2*8 + RSP-RIP)(%rsp),%rax /* RSP */
- movq %rax,(4*8)(%rdi)
- andl $0xffff0000,%eax
- popq_cfi %rdi
-@@ -873,7 +1312,7 @@ retint_signal:
- jmp retint_with_reschedule
-
- CFI_ENDPROC
--END(common_interrupt)
-+ENDPROC(common_interrupt)
-
- /*
- * APIC interrupts.
-@@ -887,7 +1326,7 @@ ENTRY(\sym)
- interrupt \do_sym
- jmp ret_from_intr
- CFI_ENDPROC
--END(\sym)
-+ENDPROC(\sym)
- .endm
-
- #ifdef CONFIG_TRACING
-@@ -960,7 +1399,7 @@ apicinterrupt IRQ_WORK_VECTOR \
- /*
- * Exception entry points.
- */
--#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
-+#define CPU_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
-
- .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
- ENTRY(\sym)
-@@ -1016,6 +1455,12 @@ ENTRY(\sym)
- .endif
-
- .if \shift_ist != -1
-+#ifdef CONFIG_SMP
-+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
-+ lea cpu_tss(%r13), %r13
-+#else
-+ lea cpu_tss(%rip), %r13
-+#endif
- subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
- .endif
-
-@@ -1063,7 +1508,7 @@ ENTRY(\sym)
- .endif
+ if (cpu_has_xsave)
+- fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
++ fpu->state->xsave.header.xfeatures |= XSTATE_FPSSE;
- CFI_ENDPROC
--END(\sym)
-+ENDPROC(\sym)
- .endm
-
- #ifdef CONFIG_TRACING
-@@ -1104,9 +1549,10 @@ gs_change:
- 2: mfence /* workaround */
- SWAPGS
- popfq_cfi
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
--END(native_load_gs_index)
-+ENDPROC(native_load_gs_index)
-
- _ASM_EXTABLE(gs_change,bad_gs)
- .section .fixup,"ax"
-@@ -1134,9 +1580,10 @@ ENTRY(do_softirq_own_stack)
- CFI_DEF_CFA_REGISTER rsp
- CFI_ADJUST_CFA_OFFSET -8
- decl PER_CPU_VAR(irq_count)
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
--END(do_softirq_own_stack)
-+ENDPROC(do_softirq_own_stack)
-
- #ifdef CONFIG_XEN
- idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-@@ -1177,7 +1624,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
- #endif
- jmp error_exit
- CFI_ENDPROC
--END(xen_do_hypervisor_callback)
-+ENDPROC(xen_do_hypervisor_callback)
-
- /*
- * Hypervisor uses this for application faults while it executes.
-@@ -1238,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
- SAVE_EXTRA_REGS
- jmp error_exit
- CFI_ENDPROC
--END(xen_failsafe_callback)
-+ENDPROC(xen_failsafe_callback)
-
- apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
- xen_hvm_callback_vector xen_evtchn_do_upcall
-@@ -1284,9 +1731,39 @@ ENTRY(paranoid_entry)
- js 1f /* negative -> in kernel */
- SWAPGS
- xorl %ebx,%ebx
--1: ret
-+1:
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ testb $3, CS+8(%rsp)
-+ jnz 1f
-+ pax_enter_kernel
-+ jmp 2f
-+1: pax_enter_kernel_user
-+2:
-+#else
-+ pax_enter_kernel
-+#endif
-+ pax_force_retaddr
-+ ret
- CFI_ENDPROC
--END(paranoid_entry)
-+ENDPROC(paranoid_entry)
-+
-+ENTRY(paranoid_entry_nmi)
-+ XCPT_FRAME 1 15*8
-+ cld
-+ SAVE_C_REGS 8
-+ SAVE_EXTRA_REGS 8
-+ movl $1,%ebx
-+ movl $MSR_GS_BASE,%ecx
-+ rdmsr
-+ testl %edx,%edx
-+ js 1f /* negative -> in kernel */
-+ SWAPGS
-+ xorl %ebx,%ebx
-+1: pax_enter_kernel_nmi
-+ pax_force_retaddr
-+ ret
-+ CFI_ENDPROC
-+ENDPROC(paranoid_entry_nmi)
+ return ret;
+ }
+@@ -84,7 +84,7 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
- /*
- * "Paranoid" exit path from exception stack. This is invoked
-@@ -1303,20 +1780,27 @@ ENTRY(paranoid_exit)
- DEFAULT_FRAME
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF_DEBUG
-- testl %ebx,%ebx /* swapgs needed? */
-+ testl $1,%ebx /* swapgs needed? */
- jnz paranoid_exit_no_swapgs
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pax_exit_kernel_user
-+#else
-+ pax_exit_kernel
-+#endif
- TRACE_IRQS_IRETQ
- SWAPGS_UNSAFE_STACK
- jmp paranoid_exit_restore
- paranoid_exit_no_swapgs:
-+ pax_exit_kernel
- TRACE_IRQS_IRETQ_DEBUG
- paranoid_exit_restore:
- RESTORE_EXTRA_REGS
- RESTORE_C_REGS
- REMOVE_PT_GPREGS_FROM_STACK 8
-+ pax_force_retaddr_bts
- INTERRUPT_RETURN
- CFI_ENDPROC
--END(paranoid_exit)
-+ENDPROC(paranoid_exit)
+ fpu__activate_fpstate_read(fpu);
- /*
- * Save all registers in pt_regs, and switch gs if needed.
-@@ -1328,12 +1812,23 @@ ENTRY(error_entry)
- SAVE_C_REGS 8
- SAVE_EXTRA_REGS 8
- xorl %ebx,%ebx
-- testl $3,CS+8(%rsp)
-+ testb $3,CS+8(%rsp)
- je error_kernelspace
- error_swapgs:
- SWAPGS
- error_sti:
-+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ testb $3, CS+8(%rsp)
-+ jnz 1f
-+ pax_enter_kernel
-+ jmp 2f
-+1: pax_enter_kernel_user
-+2:
-+#else
-+ pax_enter_kernel
-+#endif
- TRACE_IRQS_OFF
-+ pax_force_retaddr
- ret
+- xsave = &fpu->state.xsave;
++ xsave = &fpu->state->xsave;
/*
-@@ -1368,7 +1863,7 @@ error_bad_iret:
- decl %ebx /* Return to usergs */
- jmp error_sti
- CFI_ENDPROC
--END(error_entry)
-+ENDPROC(error_entry)
+ * Copy the 48bytes defined by the software first into the xstate
+@@ -113,7 +113,7 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
+ fpu__activate_fpstate_write(fpu);
- /* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
-@@ -1379,7 +1874,7 @@ ENTRY(error_exit)
- DISABLE_INTERRUPTS(CLBR_NONE)
- TRACE_IRQS_OFF
- GET_THREAD_INFO(%rcx)
-- testl %eax,%eax
-+ testl $1,%eax
- jne retint_kernel
- LOCKDEP_SYS_EXIT_IRQ
- movl TI_flags(%rcx),%edx
-@@ -1388,7 +1883,7 @@ ENTRY(error_exit)
- jnz retint_careful
- jmp retint_swapgs
- CFI_ENDPROC
--END(error_exit)
-+ENDPROC(error_exit)
-
- /* Runs on exception stack */
- ENTRY(nmi)
-@@ -1473,6 +1968,12 @@ ENTRY(nmi)
- pushq %r14 /* pt_regs->r14 */
- pushq %r15 /* pt_regs->r15 */
+- xsave = &fpu->state.xsave;
++ xsave = &fpu->state->xsave;
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ xorl %ebx,%ebx
-+#endif
-+
-+ pax_enter_kernel_nmi
-+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
/*
- * At this point we no longer need to worry about stack damage
- * due to nesting -- we're on the normal thread stack and we're
-@@ -1482,12 +1983,19 @@ ENTRY(nmi)
- movq $-1, %rsi
- call do_nmi
+@@ -204,7 +204,7 @@ static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave)
+ void
+ convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
+ {
+- struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
++ struct fxregs_state *fxsave = &tsk->thread.fpu.state->fxsave;
+ struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
+ struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
+ int i;
+@@ -242,7 +242,7 @@ void convert_to_fxsr(struct task_struct *tsk,
+ const struct user_i387_ia32_struct *env)
-+ pax_exit_kernel_nmi
-+
- /*
- * Return back to user mode. We must *not* do the normal exit
- * work, because we don't want to enable interrupts. Fortunately,
- * do_nmi doesn't modify pt_regs.
+ {
+- struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
++ struct fxregs_state *fxsave = &tsk->thread.fpu.state->fxsave;
+ struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
+ struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
+ int i;
+@@ -280,7 +280,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
+
+ if (!cpu_has_fxsr)
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+- &fpu->state.fsave, 0,
++ &fpu->state->fsave, 0,
+ -1);
+
+ fpstate_sanitize_xstate(fpu);
+@@ -311,7 +311,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
+
+ if (!cpu_has_fxsr)
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+- &fpu->state.fsave, 0,
++ &fpu->state->fsave, 0,
+ -1);
+
+ if (pos > 0 || count < sizeof(env))
+@@ -326,7 +326,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
+ * presence of FP.
*/
- SWAPGS
-+
-+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
-+ movq_cfi_restore RBX, rbx
-+#endif
-+
- jmp restore_c_regs_and_iret
+ if (cpu_has_xsave)
+- fpu->state.xsave.header.xfeatures |= XSTATE_FP;
++ fpu->state->xsave.header.xfeatures |= XSTATE_FP;
+ return ret;
+ }
- .Lnmi_from_kernel:
-@@ -1595,8 +2103,7 @@ nested_nmi:
- * Modify the "iret" frame to point to repeat_nmi, forcing another
- * iteration of NMI handling.
- */
-- leaq -1*8(%rsp), %rdx
-- movq %rdx, %rsp
-+ subq $8, %rsp
- CFI_ADJUST_CFA_OFFSET 1*8
- leaq -10*8(%rsp), %rdx
- pushq_cfi $__KERNEL_DS
-@@ -1614,6 +2121,7 @@ nested_nmi_out:
- CFI_RESTORE rdx
+diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
+index 50ec9af..bb871ca 100644
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -54,7 +54,7 @@ static inline int check_for_xstate(struct fxregs_state __user *buf,
+ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
+ {
+ if (use_fxsr()) {
+- struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
++ struct xregs_state *xsave = &tsk->thread.fpu.state->xsave;
+ struct user_i387_ia32_struct env;
+ struct _fpstate_ia32 __user *fp = buf;
- /* We are returning to kernel mode, so this cannot result in a fault. */
-+# pax_force_retaddr_bts
- INTERRUPT_RETURN
+@@ -83,18 +83,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
- CFI_RESTORE_STATE
-@@ -1679,13 +2187,13 @@ end_repeat_nmi:
- ALLOC_PT_GPREGS_ON_STACK
+ /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
+ sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
+- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
++ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
- /*
-- * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
-+ * Use paranoid_entry_nmi to handle SWAPGS, but no need to use paranoid_exit
- * as we should not be calling schedule in NMI context.
- * Even with normal interrupts enabled. An NMI should not be
- * setting NEED_RESCHED or anything that normal interrupts and
- * exceptions might do.
- */
-- call paranoid_entry
-+ call paranoid_entry_nmi
- DEFAULT_FRAME 0
+ if (!use_xsave())
+ return err;
- /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
-@@ -1693,7 +2201,9 @@ end_repeat_nmi:
- movq $-1,%rsi
- call do_nmi
+- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
++ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
-- testl %ebx,%ebx /* swapgs needed? */
-+ pax_exit_kernel_nmi
-+
-+ testl $1,%ebx /* swapgs needed? */
- jnz nmi_restore
- nmi_swapgs:
- SWAPGS_UNSAFE_STACK
-@@ -1704,6 +2214,8 @@ nmi_restore:
- /* Point RSP at the "iret" frame. */
- REMOVE_PT_GPREGS_FROM_STACK 6*8
+ /*
+ * Read the xfeatures which we copied (directly from the cpu or
+ * from the state in task struct) to the user buffers.
+ */
+- err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
++ err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
-+ pax_force_retaddr_bts
-+
/*
- * Clear "NMI executing". Set DF first so that we can easily
- * distinguish the remaining code between here and IRET from
-@@ -1722,12 +2234,12 @@ nmi_restore:
+ * For legacy compatible, we always set FP/SSE bits in the bit
+@@ -109,7 +109,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
*/
- INTERRUPT_RETURN
- CFI_ENDPROC
--END(nmi)
-+ENDPROC(nmi)
+ xfeatures |= XSTATE_FPSSE;
- ENTRY(ignore_sysret)
- CFI_STARTPROC
- mov $-ENOSYS,%eax
- sysret
- CFI_ENDPROC
--END(ignore_sysret)
-+ENDPROC(ignore_sysret)
+- err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
++ err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures);
-diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
-index f5d0730..5bce89c 100644
---- a/arch/x86/kernel/espfix_64.c
-+++ b/arch/x86/kernel/espfix_64.c
-@@ -70,8 +70,7 @@ static DEFINE_MUTEX(espfix_init_mutex);
- #define ESPFIX_MAX_PAGES DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
- static void *espfix_pages[ESPFIX_MAX_PAGES];
+ return err;
+ }
+@@ -118,6 +118,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
+ {
+ int err;
--static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
-- __aligned(PAGE_SIZE);
-+static pud_t espfix_pud_page[PTRS_PER_PUD] __page_aligned_rodata;
++ buf = (struct xregs_state __user *)____m(buf);
+ if (use_xsave())
+ err = copy_xregs_to_user(buf);
+ else if (use_fxsr())
+@@ -152,7 +153,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
+ */
+ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
+ {
+- struct xregs_state *xsave = &current->thread.fpu.state.xsave;
++ struct xregs_state *xsave = &current->thread.fpu.state->xsave;
+ struct task_struct *tsk = current;
+ int ia32_fxstate = (buf != buf_fx);
- static unsigned int page_random, slot_random;
+@@ -195,7 +196,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
+ struct user_i387_ia32_struct *ia32_env,
+ u64 xfeatures, int fx_only)
+ {
+- struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
++ struct xregs_state *xsave = &tsk->thread.fpu.state->xsave;
+ struct xstate_header *header = &xsave->header;
-@@ -122,11 +121,17 @@ static void init_espfix_random(void)
- void __init init_espfix_bsp(void)
+ if (use_xsave()) {
+@@ -228,6 +229,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
+ */
+ static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
{
- pgd_t *pgd_p;
-+ unsigned long index = pgd_index(ESPFIX_BASE_ADDR);
++ buf = (void __user *)____m(buf);
+ if (use_xsave()) {
+ if ((unsigned long)buf % 64 || fx_only) {
+ u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE;
+@@ -308,9 +310,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+ */
+ fpu__drop(fpu);
+
+- if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
++ if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
+ __copy_from_user(&env, buf, sizeof(env))) {
+- fpstate_init(&fpu->state);
++ fpstate_init(fpu->state);
+ err = -1;
+ } else {
+ sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 62fc001..5ce38be 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -93,14 +93,14 @@ EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
+ */
+ void fpstate_sanitize_xstate(struct fpu *fpu)
+ {
+- struct fxregs_state *fx = &fpu->state.fxsave;
++ struct fxregs_state *fx = &fpu->state->fxsave;
+ int feature_bit;
+ u64 xfeatures;
- /* Install the espfix pud into the kernel page directory */
-- pgd_p = &init_level4_pgt[pgd_index(ESPFIX_BASE_ADDR)];
-+ pgd_p = &init_level4_pgt[index];
- pgd_populate(&init_mm, pgd_p, (pud_t *)espfix_pud_page);
+ if (!use_xsaveopt())
+ return;
-+#ifdef CONFIG_PAX_PER_CPU_PGD
-+ clone_pgd_range(get_cpu_pgd(0, kernel) + index, swapper_pg_dir + index, 1);
-+ clone_pgd_range(get_cpu_pgd(0, user) + index, swapper_pg_dir + index, 1);
-+#endif
-+
- /* Randomize the locations */
- init_espfix_random();
+- xfeatures = fpu->state.xsave.header.xfeatures;
++ xfeatures = fpu->state->xsave.header.xfeatures;
-@@ -194,7 +199,7 @@ void init_espfix_ap(void)
- set_pte(&pte_p[n*PTE_STRIDE], pte);
+ /*
+ * None of the feature bits are in init state. So nothing else
+@@ -402,7 +402,7 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
+ if (!boot_cpu_has(X86_FEATURE_XSAVE))
+ return NULL;
- /* Job is done for this CPU and any CPU which shares this page */
-- ACCESS_ONCE(espfix_pages[page]) = stack_page;
-+ ACCESS_ONCE_RW(espfix_pages[page]) = stack_page;
+- xsave = &current->thread.fpu.state.xsave;
++ xsave = &current->thread.fpu.state->xsave;
+ /*
+ * We should not ever be requesting features that we
+ * have not enabled. Remember that pcntxt_mask is
+@@ -457,5 +457,5 @@ const void *get_xsave_field_ptr(int xsave_state)
+ */
+ fpu__save(fpu);
- unlock_done:
- mutex_unlock(&espfix_init_mutex);
+- return get_xsave_addr(&fpu->state.xsave, xsave_state);
++ return get_xsave_addr(&fpu->state->xsave, xsave_state);
+ }
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 8b7b0a5..02219db 100644
--- a/arch/x86/kernel/ftrace.c
@@ -24404,7 +25524,7 @@ index f129a9a..af8f6da 100644
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
-index 7e429c9..7244a52 100644
+index 0e2d96f..5889003 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -27,6 +27,12 @@
@@ -24460,7 +25580,7 @@ index 7e429c9..7244a52 100644
ENTRY(startup_32)
movl pa(stack_start),%ecx
-@@ -114,6 +129,59 @@ ENTRY(startup_32)
+@@ -114,6 +129,66 @@ ENTRY(startup_32)
2:
leal -__PAGE_OFFSET(%ecx),%esp
@@ -24473,7 +25593,14 @@ index 7e429c9..7244a52 100644
+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
+ movl $__per_cpu_end - 1,%eax
+ subl $__per_cpu_start,%eax
++ cmpl $0x100000,%eax
++ jb 1f
++ shrl $PAGE_SHIFT,%eax
++ orb $0x80,GDT_ENTRY_PERCPU * 8 + 6(%edi)
++1:
+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
++ shrl $16,%eax
++ orb %al,GDT_ENTRY_PERCPU * 8 + 6(%edi)
+#endif
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
@@ -24520,7 +25647,7 @@ index 7e429c9..7244a52 100644
/*
* Clear BSS first so that there are no surprises...
*/
-@@ -209,8 +277,11 @@ ENTRY(startup_32)
+@@ -209,8 +284,11 @@ ENTRY(startup_32)
movl %eax, pa(max_pfn_mapped)
/* Do early initialization of the fixmap area */
@@ -24534,7 +25661,7 @@ index 7e429c9..7244a52 100644
#else /* Not PAE */
page_pde_offset = (__PAGE_OFFSET >> 20);
-@@ -240,8 +311,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -240,8 +318,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
movl %eax, pa(max_pfn_mapped)
/* Do early initialization of the fixmap area */
@@ -24548,7 +25675,7 @@ index 7e429c9..7244a52 100644
#endif
#ifdef CONFIG_PARAVIRT
-@@ -255,9 +329,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
+@@ -255,9 +336,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
cmpl $num_subarch_entries, %eax
jae bad_subarch
@@ -24559,7 +25686,7 @@ index 7e429c9..7244a52 100644
bad_subarch:
WEAK(lguest_entry)
-@@ -269,10 +341,10 @@ WEAK(xen_entry)
+@@ -269,10 +348,10 @@ WEAK(xen_entry)
__INITDATA
subarch_entries:
@@ -24574,7 +25701,7 @@ index 7e429c9..7244a52 100644
num_subarch_entries = (. - subarch_entries) / 4
.previous
#else
-@@ -362,6 +434,7 @@ default_entry:
+@@ -362,6 +441,7 @@ default_entry:
movl pa(mmu_cr4_features),%eax
movl %eax,%cr4
@@ -24582,7 +25709,7 @@ index 7e429c9..7244a52 100644
testb $X86_CR4_PAE, %al # check if PAE is enabled
jz enable_paging
-@@ -390,6 +463,9 @@ default_entry:
+@@ -390,6 +470,9 @@ default_entry:
/* Make changes effective */
wrmsr
@@ -24592,7 +25719,7 @@ index 7e429c9..7244a52 100644
enable_paging:
/*
-@@ -457,14 +533,20 @@ is486:
+@@ -457,14 +540,20 @@ is486:
1: movl $(__KERNEL_DS),%eax # reload all the segment registers
movl %eax,%ss # after changing gdt.
@@ -24614,7 +25741,7 @@ index 7e429c9..7244a52 100644
movl %eax,%gs
xorl %eax,%eax # Clear LDT
-@@ -521,8 +603,11 @@ setup_once:
+@@ -521,8 +610,11 @@ setup_once:
* relocation. Manually set base address in stack canary
* segment descriptor.
*/
@@ -24627,16 +25754,16 @@ index 7e429c9..7244a52 100644
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
shrl $16, %ecx
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
-@@ -559,7 +644,7 @@ early_idt_handler_common:
+@@ -559,7 +651,7 @@ early_idt_handler_common:
cmpl $2,(%esp) # X86_TRAP_NMI
- je is_nmi # Ignore NMI
+ je .Lis_nmi # Ignore NMI
- cmpl $2,%ss:early_recursion_flag
+ cmpl $1,%ss:early_recursion_flag
je hlt_loop
incl %ss:early_recursion_flag
-@@ -597,8 +682,8 @@ early_idt_handler_common:
+@@ -597,8 +689,8 @@ early_idt_handler_common:
pushl (20+6*4)(%esp) /* trapno */
pushl $fault_msg
call printk
@@ -24646,7 +25773,7 @@ index 7e429c9..7244a52 100644
hlt_loop:
hlt
jmp hlt_loop
-@@ -618,8 +703,11 @@ ENDPROC(early_idt_handler_common)
+@@ -618,8 +710,11 @@ ENDPROC(early_idt_handler_common)
/* This is the default interrupt "handler" :-) */
ALIGN
ignore_int:
@@ -24659,7 +25786,7 @@ index 7e429c9..7244a52 100644
pushl %eax
pushl %ecx
pushl %edx
-@@ -628,9 +716,6 @@ ignore_int:
+@@ -628,9 +723,6 @@ ignore_int:
movl $(__KERNEL_DS),%eax
movl %eax,%ds
movl %eax,%es
@@ -24669,7 +25796,7 @@ index 7e429c9..7244a52 100644
pushl 16(%esp)
pushl 24(%esp)
pushl 32(%esp)
-@@ -664,29 +749,34 @@ ENTRY(setup_once_ref)
+@@ -664,29 +756,34 @@ ENTRY(setup_once_ref)
/*
* BSS section
*/
@@ -24709,7 +25836,7 @@ index 7e429c9..7244a52 100644
ENTRY(initial_page_table)
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
# if KPMDS == 3
-@@ -705,12 +795,20 @@ ENTRY(initial_page_table)
+@@ -705,12 +802,20 @@ ENTRY(initial_page_table)
# error "Kernel PMDs should be 1, 2 or 3"
# endif
.align PAGE_SIZE /* needs to be page-sized too */
@@ -24731,7 +25858,7 @@ index 7e429c9..7244a52 100644
__INITRODATA
int_msg:
-@@ -738,7 +836,7 @@ fault_msg:
+@@ -738,7 +843,7 @@ fault_msg:
* segment size, and 32-bit linear address value:
*/
@@ -24740,7 +25867,7 @@ index 7e429c9..7244a52 100644
.globl boot_gdt_descr
.globl idt_descr
-@@ -747,7 +845,7 @@ fault_msg:
+@@ -747,7 +852,7 @@ fault_msg:
.word 0 # 32 bit align gdt_desc.address
boot_gdt_descr:
.word __BOOT_DS+7
@@ -24749,7 +25876,7 @@ index 7e429c9..7244a52 100644
.word 0 # 32-bit align idt_desc.address
idt_descr:
-@@ -758,7 +856,7 @@ idt_descr:
+@@ -758,7 +863,7 @@ idt_descr:
.word 0 # 32 bit align gdt_desc.address
ENTRY(early_gdt_descr)
.word GDT_ENTRIES*8-1
@@ -24758,7 +25885,7 @@ index 7e429c9..7244a52 100644
/*
* The boot_gdt must mirror the equivalent in setup.S and is
-@@ -767,5 +865,65 @@ ENTRY(early_gdt_descr)
+@@ -767,5 +872,65 @@ ENTRY(early_gdt_descr)
.align L1_CACHE_BYTES
ENTRY(boot_gdt)
.fill GDT_ENTRY_BOOT_CS,8,0
@@ -24827,7 +25954,7 @@ index 7e429c9..7244a52 100644
+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
+ .endr
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
-index 7e5da2c..761adf1 100644
+index 1d40ca8..4d38dbd 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -20,6 +20,8 @@
@@ -24852,7 +25979,7 @@ index 7e5da2c..761adf1 100644
.text
__HEAD
-@@ -89,11 +97,26 @@ startup_64:
+@@ -89,11 +97,33 @@ startup_64:
* Fixup the physical addresses in the page table
*/
addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
@@ -24874,6 +26001,13 @@ index 7e5da2c..761adf1 100644
+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
+
++ addq %rbp, level2_ident_pgt + (0*8)(%rip)
++
++ addq %rbp, level2_fixmap_pgt + (0*8)(%rip)
++ addq %rbp, level2_fixmap_pgt + (1*8)(%rip)
++ addq %rbp, level2_fixmap_pgt + (2*8)(%rip)
++ addq %rbp, level2_fixmap_pgt + (3*8)(%rip)
++
+ addq %rbp, level2_fixmap_pgt + (504*8)(%rip)
+ addq %rbp, level2_fixmap_pgt + (505*8)(%rip)
addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
@@ -24881,7 +26015,7 @@ index 7e5da2c..761adf1 100644
/*
* Set up the identity mapping for the switchover. These
-@@ -174,11 +197,12 @@ ENTRY(secondary_startup_64)
+@@ -174,11 +204,12 @@ ENTRY(secondary_startup_64)
* after the boot processor executes this code.
*/
@@ -24896,7 +26030,7 @@ index 7e5da2c..761adf1 100644
movq %rcx, %cr4
/* Setup early boot stage 4 level pagetables. */
-@@ -199,10 +223,21 @@ ENTRY(secondary_startup_64)
+@@ -199,10 +230,21 @@ ENTRY(secondary_startup_64)
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_SCE, %eax /* Enable System Call */
@@ -24919,7 +26053,7 @@ index 7e5da2c..761adf1 100644
1: wrmsr /* Make changes effective */
/* Setup cr0 */
-@@ -282,6 +317,7 @@ ENTRY(secondary_startup_64)
+@@ -282,6 +324,7 @@ ENTRY(secondary_startup_64)
* REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
* address given in m16:64.
*/
@@ -24927,7 +26061,7 @@ index 7e5da2c..761adf1 100644
movq initial_code(%rip),%rax
pushq $0 # fake return address to stop unwinder
pushq $__KERNEL_CS # set correct cs
-@@ -313,7 +349,7 @@ ENDPROC(start_cpu0)
+@@ -313,7 +356,7 @@ ENDPROC(start_cpu0)
.quad INIT_PER_CPU_VAR(irq_stack_union)
GLOBAL(stack_start)
@@ -24936,7 +26070,7 @@ index 7e5da2c..761adf1 100644
.word 0
__FINITDATA
-@@ -393,7 +429,7 @@ early_idt_handler_common:
+@@ -393,7 +436,7 @@ early_idt_handler_common:
call dump_stack
#ifdef CONFIG_KALLSYMS
leaq early_idt_ripmsg(%rip),%rdi
@@ -24945,7 +26079,7 @@ index 7e5da2c..761adf1 100644
call __print_symbol
#endif
#endif /* EARLY_PRINTK */
-@@ -422,6 +458,7 @@ ENDPROC(early_idt_handler_common)
+@@ -422,6 +465,7 @@ ENDPROC(early_idt_handler_common)
early_recursion_flag:
.long 0
@@ -24953,7 +26087,13 @@ index 7e5da2c..761adf1 100644
#ifdef CONFIG_EARLY_PRINTK
early_idt_msg:
.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
-@@ -449,29 +486,52 @@ NEXT_PAGE(early_level4_pgt)
+@@ -444,40 +488,67 @@ GLOBAL(name)
+ __INITDATA
+ NEXT_PAGE(early_level4_pgt)
+ .fill 511,8,0
+- .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
++ .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
+
NEXT_PAGE(early_dynamic_pgts)
.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
@@ -24976,15 +26116,16 @@ index 7e5da2c..761adf1 100644
+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
.org init_level4_pgt + L4_START_KERNEL*8, 0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
- .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
-
+- .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
++ .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
++
+#ifdef CONFIG_PAX_PER_CPU_PGD
+NEXT_PAGE(cpu_pgd)
+ .rept 2*NR_CPUS
+ .fill 512,8,0
+ .endr
+#endif
-+
+
NEXT_PAGE(level3_ident_pgt)
.quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+#ifdef CONFIG_XEN
@@ -25006,26 +26147,27 @@ index 7e5da2c..761adf1 100644
+
NEXT_PAGE(level2_ident_pgt)
- /* Since I easily can, map the first 1G.
++ .quad level1_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ /* Since I easily can, map the first 2G.
* Don't set NX because code runs from these pages.
*/
- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
-#endif
-+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
++ PMDS(PMD_SIZE, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD - 1)
NEXT_PAGE(level3_kernel_pgt)
.fill L3_START_KERNEL,8,0
-@@ -479,6 +539,9 @@ NEXT_PAGE(level3_kernel_pgt)
+ /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
- .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
-
+- .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
++ .quad level2_fixmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
++
+NEXT_PAGE(level2_vmemmap_pgt)
+ .fill 512,8,0
-+
+
NEXT_PAGE(level2_kernel_pgt)
/*
- * 512 MB kernel mapping. We spend a full page on this pagetable
-@@ -494,31 +557,69 @@ NEXT_PAGE(level2_kernel_pgt)
+@@ -494,31 +565,79 @@ NEXT_PAGE(level2_kernel_pgt)
KERNEL_IMAGE_SIZE/PMD_SIZE)
NEXT_PAGE(level2_fixmap_pgt)
@@ -25033,13 +26175,23 @@ index 7e5da2c..761adf1 100644
- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
- .fill 5,8,0
-+ .fill 504,8,0
-+ .quad level1_fixmap_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _PAGE_TABLE
-+ .quad level1_fixmap_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _PAGE_TABLE
-+ .quad level1_fixmap_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _PAGE_TABLE
-+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
++ .quad level1_modules_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _KERNPG_TABLE
++ .quad level1_modules_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _KERNPG_TABLE
++ .quad level1_modules_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _KERNPG_TABLE
++ .quad level1_modules_pgt - __START_KERNEL_map + 3 * PAGE_SIZE + _KERNPG_TABLE
++ .fill 500,8,0
++ .quad level1_fixmap_pgt - __START_KERNEL_map + 0 * PAGE_SIZE + _KERNPG_TABLE
++ .quad level1_fixmap_pgt - __START_KERNEL_map + 1 * PAGE_SIZE + _KERNPG_TABLE
++ .quad level1_fixmap_pgt - __START_KERNEL_map + 2 * PAGE_SIZE + _KERNPG_TABLE
++ .quad level1_vsyscall_pgt - __START_KERNEL_map + _KERNPG_TABLE
+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
+ .fill 4,8,0
++
++NEXT_PAGE(level1_ident_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level1_modules_pgt)
++ .fill 4*512,8,0
NEXT_PAGE(level1_fixmap_pgt)
+ .fill 3*512,8,0
@@ -25104,7 +26256,7 @@ index 7e5da2c..761adf1 100644
.skip PAGE_SIZE
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
-index 05fd74f..c3548b1 100644
+index 64341aa..b1e6632 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
@@ -25120,9 +26272,9 @@ index 05fd74f..c3548b1 100644
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
-@@ -44,3 +48,11 @@ EXPORT_SYMBOL(___preempt_schedule);
- EXPORT_SYMBOL(___preempt_schedule_context);
- #endif
+@@ -42,3 +46,11 @@ EXPORT_SYMBOL(empty_zero_page);
+ EXPORT_SYMBOL(___preempt_schedule);
+ EXPORT_SYMBOL(___preempt_schedule_notrace);
#endif
+
+#ifdef CONFIG_PAX_KERNEXEC
@@ -25133,7 +26285,7 @@ index 05fd74f..c3548b1 100644
+EXPORT_SYMBOL(cpu_pgd);
+#endif
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
-index e7cc537..67d7372 100644
+index 16cb827..372334f 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
@@ -25234,19 +26386,19 @@ index 37dae79..620dd84 100644
regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
t->iopl = level << 12;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
-index e5952c2..11c3a54 100644
+index c7dfe1b..146f63c 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
-@@ -22,7 +22,7 @@
- #define CREATE_TRACE_POINTS
- #include <asm/trace/irq_vectors.h>
+@@ -28,7 +28,7 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
+ DEFINE_PER_CPU(struct pt_regs *, irq_regs);
+ EXPORT_PER_CPU_SYMBOL(irq_regs);
-atomic_t irq_err_count;
+atomic_unchecked_t irq_err_count;
/* Function pointer for generic interrupt vector handling */
void (*x86_platform_ipi_callback)(void) = NULL;
-@@ -132,9 +132,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
+@@ -144,9 +144,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
seq_puts(p, " Hypervisor callback interrupts\n");
#endif
@@ -25256,9 +26408,9 @@ index e5952c2..11c3a54 100644
- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
#endif
- return 0;
- }
-@@ -174,7 +174,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
+ #ifdef CONFIG_HAVE_KVM
+ seq_printf(p, "%*s: ", prec, "PIN");
+@@ -198,7 +198,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
u64 arch_irq_stat(void)
{
@@ -25268,10 +26420,10 @@ index e5952c2..11c3a54 100644
}
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
-index f9fd86a..e6cc9ae 100644
+index cd74f59..588af0b 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
-@@ -29,6 +29,8 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
+@@ -23,6 +23,8 @@
#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -25280,7 +26432,7 @@ index f9fd86a..e6cc9ae 100644
int sysctl_panic_on_stackoverflow __read_mostly;
/* Debugging check for stack overflow: is there less than 1KB free? */
-@@ -39,13 +41,14 @@ static int check_stack_overflow(void)
+@@ -33,13 +35,14 @@ static int check_stack_overflow(void)
__asm__ __volatile__("andl %%esp,%0" :
"=r" (sp) : "0" (THREAD_SIZE - 1));
@@ -25296,7 +26448,7 @@ index f9fd86a..e6cc9ae 100644
if (sysctl_panic_on_stackoverflow)
panic("low stack detected by irq handler - check messages\n");
}
-@@ -77,10 +80,9 @@ static inline void *current_stack(void)
+@@ -71,10 +74,9 @@ static inline void *current_stack(void)
static inline int
execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
{
@@ -25308,7 +26460,7 @@ index f9fd86a..e6cc9ae 100644
irqstk = __this_cpu_read(hardirq_stack);
/*
-@@ -89,15 +91,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+@@ -83,15 +85,19 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
* handler) we can't do that and just have to keep using the
* current stack (which is the irq stack already after all)
*/
@@ -25330,7 +26482,7 @@ index f9fd86a..e6cc9ae 100644
if (unlikely(overflow))
call_on_stack(print_stack_overflow, isp);
-@@ -108,6 +114,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+@@ -102,6 +108,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
: "0" (irq), "1" (desc), "2" (isp),
"D" (desc->handle_irq)
: "memory", "cc", "ecx");
@@ -25342,7 +26494,7 @@ index f9fd86a..e6cc9ae 100644
return 1;
}
-@@ -116,32 +127,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+@@ -110,32 +121,18 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
*/
void irq_ctx_init(int cpu)
{
@@ -25377,7 +26529,7 @@ index f9fd86a..e6cc9ae 100644
irqstk = __this_cpu_read(softirq_stack);
/* build the stack frame on the softirq stack */
-@@ -151,7 +148,16 @@ void do_softirq_own_stack(void)
+@@ -145,7 +142,16 @@ void do_softirq_own_stack(void)
prev_esp = (u32 *)irqstk;
*prev_esp = current_stack_pointer();
@@ -25395,19 +26547,19 @@ index f9fd86a..e6cc9ae 100644
bool handle_irq(unsigned irq, struct pt_regs *regs)
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
-index 394e643..824fce8 100644
+index bc4604e..0be227d 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
-@@ -26,6 +26,8 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
- DEFINE_PER_CPU(struct pt_regs *, irq_regs);
- EXPORT_PER_CPU_SYMBOL(irq_regs);
+@@ -20,6 +20,8 @@
+ #include <asm/idle.h>
+ #include <asm/apic.h>
+extern void gr_handle_kernel_exploit(void);
+
int sysctl_panic_on_stackoverflow;
/*
-@@ -69,6 +71,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
+@@ -63,6 +65,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
irq_stack_top, irq_stack_bottom,
estack_top, estack_bottom);
@@ -25417,14 +26569,14 @@ index 394e643..824fce8 100644
panic("low stack detected by irq handler - check messages\n");
#endif
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
-index 26d5a55..bf8b49b 100644
+index 26d5a55..063fef8 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -31,6 +31,8 @@ static void bug_at(unsigned char *ip, int line)
* Something went wrong. Crash the box, as something could be
* corrupting the kernel.
*/
-+ ip = ktla_ktva(ip);
++ ip = (unsigned char *)ktla_ktva((unsigned long)ip);
+ pr_warning("Unexpected op at %pS [%p] %s:%d\n", ip, ip, __FILE__, line);
pr_warning("Unexpected op at %pS [%p] (%02x %02x %02x %02x %02x) %s:%d\n",
ip, ip, ip[0], ip[1], ip[2], ip[3], ip[4], __FILE__, line);
@@ -25464,7 +26616,7 @@ index 26d5a55..bf8b49b 100644
}
memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
-index d6178d9..e12482f 100644
+index d6178d9..598681f 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -228,7 +228,10 @@ static void kgdb_correct_hw_break(void)
@@ -25508,12 +26660,12 @@ index d6178d9..e12482f 100644
bpt->type = BP_BREAKPOINT;
- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
-+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
++ err = probe_kernel_read(bpt->saved_instr, (const void *)ktla_ktva(bpt->bpt_addr),
BREAK_INSTR_SIZE);
if (err)
return err;
- err = probe_kernel_write((char *)bpt->bpt_addr,
-+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
++ err = probe_kernel_write((void *)ktla_ktva(bpt->bpt_addr),
arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
#ifdef CONFIG_DEBUG_RODATA
if (!err)
@@ -25538,12 +26690,12 @@ index d6178d9..e12482f 100644
knl_write:
#endif /* CONFIG_DEBUG_RODATA */
- return probe_kernel_write((char *)bpt->bpt_addr,
-+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
++ return probe_kernel_write((void *)ktla_ktva(bpt->bpt_addr),
(char *)bpt->saved_instr, BREAK_INSTR_SIZE);
}
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
-index 1deffe6..4705700 100644
+index 1deffe6..3be342a 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -120,9 +120,12 @@ __synthesize_relative_insn(void *from, void *to, u8 op)
@@ -25551,7 +26703,7 @@ index 1deffe6..4705700 100644
} __packed *insn;
- insn = (struct __arch_relative_insn *)from;
-+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
++ insn = (struct __arch_relative_insn *)ktla_ktva((unsigned long)from);
+
+ pax_open_kernel();
insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
@@ -25736,7 +26888,7 @@ index c2bedae..25e7ab60 100644
.name = "data",
.mode = S_IRUGO,
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
-index c37886d..f43b63d 100644
+index 2bcc052..864eb84 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -11,6 +11,7 @@
@@ -25745,9 +26897,9 @@ index c37886d..f43b63d 100644
#include <linux/mm.h>
+#include <linux/ratelimit.h>
#include <linux/smp.h>
+ #include <linux/slab.h>
#include <linux/vmalloc.h>
- #include <linux/uaccess.h>
-@@ -20,6 +21,14 @@
+@@ -21,6 +22,14 @@
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
@@ -25759,39 +26911,13 @@ index c37886d..f43b63d 100644
+int sysctl_modify_ldt __read_only = 0;
+#endif
+
- #ifdef CONFIG_SMP
+ /* context.lock is held for us, so we don't need any locking. */
static void flush_ldt(void *current_mm)
{
-@@ -66,13 +75,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
- if (reload) {
- #ifdef CONFIG_SMP
- preempt_disable();
-- load_LDT(pc);
-+ load_LDT_nolock(pc);
- if (!cpumask_equal(mm_cpumask(current->mm),
- cpumask_of(smp_processor_id())))
- smp_call_function(flush_ldt, current->mm, 1);
- preempt_enable();
- #else
-- load_LDT(pc);
-+ load_LDT_nolock(pc);
- #endif
- }
- if (oldsize) {
-@@ -94,7 +103,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
- return err;
-
- for (i = 0; i < old->size; i++)
-- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
-+ write_ldt_entry(new->ldt, i, old->ldt + i);
- return 0;
- }
+@@ -109,6 +118,23 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ struct mm_struct *old_mm;
+ int retval = 0;
-@@ -115,6 +124,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
- retval = copy_ldt(&mm->context, &old_mm->context);
- mutex_unlock(&old_mm->context.lock);
- }
-+
+ if (tsk == current) {
+ mm->context.vdso = 0;
+
@@ -25809,24 +26935,25 @@ index c37886d..f43b63d 100644
+
+ }
+
- return retval;
- }
-
-@@ -229,6 +256,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
- }
- }
-
+ mutex_init(&mm->context.lock);
+ old_mm = current->mm;
+ if (!old_mm) {
+@@ -235,6 +261,14 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ /* The user wants to clear the entry. */
+ memset(&ldt, 0, sizeof(ldt));
+ } else {
++
+#ifdef CONFIG_PAX_SEGMEXEC
-+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
-+ error = -EINVAL;
-+ goto out_unlock;
-+ }
++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
++ error = -EINVAL;
++ goto out;
++ }
+#endif
+
- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
- error = -EINVAL;
- goto out_unlock;
-@@ -254,6 +288,15 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
+ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+ error = -EINVAL;
+ goto out;
+@@ -276,6 +310,15 @@ asmlinkage int sys_modify_ldt(int func, void __user *ptr,
{
int ret = -ENOSYS;
@@ -25992,7 +27119,7 @@ index 94ea120..4154cea 100644
+ENDPROC(return_to_handler)
#endif
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
-index 005c03e..2f440cd 100644
+index 005c03e..7000fe4 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -75,17 +75,17 @@ static unsigned long int get_module_load_offset(void)
@@ -26085,7 +27212,7 @@ index 005c03e..2f440cd 100644
+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
+ location = (uint32_t)plocation;
+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
-+ plocation = ktla_ktva((void *)plocation);
++ plocation = (uint32_t *)ktla_ktva((unsigned long)plocation);
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
@@ -26302,20 +27429,20 @@ index 6d9582e..f746287 100644
return;
}
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
-index bbb6c73..24a58ef 100644
+index 33ee3e0..da3519a 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
-@@ -8,7 +8,7 @@
-
- #include <asm/paravirt.h>
+@@ -23,7 +23,7 @@ bool pv_is_native_spin_unlock(void)
+ }
+ #endif
-struct pv_lock_ops pv_lock_ops = {
+struct pv_lock_ops pv_lock_ops __read_only = {
#ifdef CONFIG_SMP
- .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
- .unlock_kick = paravirt_nop,
+ #ifdef CONFIG_QUEUED_SPINLOCKS
+ .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
-index c614dd4..9ad659e 100644
+index 58bcfb6..0adb7d7 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -56,6 +56,9 @@ u64 _paravirt_ident_64(u64 x)
@@ -26334,7 +27461,7 @@ index c614dd4..9ad659e 100644
/* If there's no function, patch it with a ud2a (BUG) */
- ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
- else if (opfunc == _paravirt_nop)
-+ ret = paravirt_patch_insns(insnbuf, len, ktva_ktla(ud2a), ud2a+sizeof(ud2a));
++ ret = paravirt_patch_insns(insnbuf, len, (const char *)ktva_ktla((unsigned long)ud2a), ud2a+sizeof(ud2a));
+ else if (opfunc == (void *)_paravirt_nop)
/* If the operation is a nop, then nop the callsite */
ret = paravirt_patch_nop();
@@ -26352,17 +27479,17 @@ index c614dd4..9ad659e 100644
+#endif
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
- type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
-@@ -176,7 +183,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
+ #ifdef CONFIG_X86_32
+@@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
if (insn_len > len || start == NULL)
insn_len = len;
else
- memcpy(insnbuf, start, insn_len);
-+ memcpy(insnbuf, ktla_ktva(start), insn_len);
++ memcpy(insnbuf, (const char *)ktla_ktva((unsigned long)start), insn_len);
return insn_len;
}
-@@ -300,7 +307,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
+@@ -302,7 +309,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return this_cpu_read(paravirt_lazy_mode);
}
@@ -26371,7 +27498,7 @@ index c614dd4..9ad659e 100644
.name = "bare hardware",
.paravirt_enabled = 0,
.kernel_rpl = 0,
-@@ -311,16 +318,16 @@ struct pv_info pv_info = {
+@@ -313,16 +320,16 @@ struct pv_info pv_info = {
#endif
};
@@ -26391,7 +27518,7 @@ index c614dd4..9ad659e 100644
.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
-@@ -332,7 +339,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
+@@ -334,7 +341,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
#endif
};
@@ -26400,7 +27527,7 @@ index c614dd4..9ad659e 100644
.cpuid = native_cpuid,
.get_debugreg = native_get_debugreg,
.set_debugreg = native_set_debugreg,
-@@ -395,21 +402,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
+@@ -397,21 +404,26 @@ NOKPROBE_SYMBOL(native_get_debugreg);
NOKPROBE_SYMBOL(native_set_debugreg);
NOKPROBE_SYMBOL(native_load_idt);
@@ -26430,7 +27557,7 @@ index c614dd4..9ad659e 100644
.read_cr2 = native_read_cr2,
.write_cr2 = native_write_cr2,
-@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
+@@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
.make_pud = PTE_IDENT,
.set_pgd = native_set_pgd,
@@ -26438,7 +27565,7 @@ index c614dd4..9ad659e 100644
#endif
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
-@@ -479,6 +492,12 @@ struct pv_mmu_ops pv_mmu_ops = {
+@@ -481,6 +494,12 @@ struct pv_mmu_ops pv_mmu_ops = {
},
.set_fixmap = native_set_fixmap,
@@ -26452,7 +27579,7 @@ index c614dd4..9ad659e 100644
EXPORT_SYMBOL_GPL(pv_time_ops);
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
-index a1da673..b6f5831 100644
+index 8aa0558..465512e 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -9,7 +9,11 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
@@ -26467,7 +27594,7 @@ index a1da673..b6f5831 100644
DEF_NATIVE(pv_cpu_ops, clts, "clts");
DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
-@@ -57,7 +61,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+@@ -62,7 +66,11 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
PATCH_SITE(pv_mmu_ops, read_cr3);
PATCH_SITE(pv_mmu_ops, write_cr3);
PATCH_SITE(pv_cpu_ops, clts);
@@ -26477,8 +27604,8 @@ index a1da673..b6f5831 100644
+#endif
+
PATCH_SITE(pv_cpu_ops, wbinvd);
-
- patch_site:
+ #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
+ case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 0497f71..7186c0d 100644
--- a/arch/x86/kernel/pci-calgary_64.c
@@ -26506,10 +27633,10 @@ index 35ccf75..7a15747 100644
#define DEBUG 1
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
-index 77dd0ad..9ec4723 100644
+index adf0392..88a7576 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
-@@ -33,7 +33,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
+@@ -40,7 +40,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size,
struct dma_attrs *attrs)
{
if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
@@ -26519,10 +27646,18 @@ index 77dd0ad..9ec4723 100644
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
-index 9717437..44bc9aa 100644
+index c27cad7..47e3f47 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
-@@ -38,7 +38,8 @@
+@@ -15,6 +15,7 @@
+ #include <linux/dmi.h>
+ #include <linux/utsname.h>
+ #include <linux/stackprotector.h>
++#include <linux/kthread.h>
+ #include <linux/tick.h>
+ #include <linux/cpuidle.h>
+ #include <trace/events/power.h>
+@@ -37,7 +38,8 @@
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
@@ -26532,7 +27667,7 @@ index 9717437..44bc9aa 100644
.x86_tss = {
.sp0 = TOP_OF_INIT_STACK,
#ifdef CONFIG_X86_32
-@@ -56,6 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+@@ -55,6 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
*/
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
#endif
@@ -26540,17 +27675,45 @@ index 9717437..44bc9aa 100644
};
EXPORT_PER_CPU_SYMBOL(cpu_tss);
-@@ -115,7 +117,7 @@ void arch_task_cache_init(void)
- task_xstate_cachep =
- kmem_cache_create("task_xstate", xstate_size,
- __alignof__(union thread_xstate),
-- SLAB_PANIC | SLAB_NOTRACK, NULL);
-+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
- setup_xstate_comp();
+@@ -75,17 +78,35 @@ void idle_notifier_unregister(struct notifier_block *n)
+ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+ #endif
+
++struct kmem_cache *fpregs_state_cachep;
++EXPORT_SYMBOL(fpregs_state_cachep);
++
++void __init arch_task_cache_init(void)
++{
++ /* create a slab on which task_structs can be allocated */
++ fpregs_state_cachep =
++ kmem_cache_create("fpregs_state", xstate_size,
++ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
++}
++
+ /*
+ * this gets called so that we can store lazy state into memory and copy the
+ * current task into the new thread.
+ */
+ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+ {
+- memcpy(dst, src, arch_task_struct_size);
++ *dst = *src;
+
++ dst->thread.fpu.state = kmem_cache_alloc_node(fpregs_state_cachep, GFP_KERNEL, tsk_fork_get_node(src));
+ return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
}
-@@ -129,7 +131,7 @@ void exit_thread(void)
- unsigned long *bp = t->io_bitmap_ptr;
++void arch_release_task_struct(struct task_struct *tsk)
++{
++ kmem_cache_free(fpregs_state_cachep, tsk->thread.fpu.state);
++ tsk->thread.fpu.state = NULL;
++}
++
+ /*
+ * Free current thread data structures etc..
+ */
+@@ -97,7 +118,7 @@ void exit_thread(void)
+ struct fpu *fpu = &t->fpu;
if (bp) {
- struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
@@ -26558,7 +27721,7 @@ index 9717437..44bc9aa 100644
t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP);
-@@ -149,6 +151,9 @@ void flush_thread(void)
+@@ -117,6 +138,9 @@ void flush_thread(void)
{
struct task_struct *tsk = current;
@@ -26568,7 +27731,7 @@ index 9717437..44bc9aa 100644
flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
-@@ -302,7 +307,7 @@ static void __exit_idle(void)
+@@ -258,7 +282,7 @@ static void __exit_idle(void)
void exit_idle(void)
{
/* idle loop has pid 0 */
@@ -26577,7 +27740,7 @@ index 9717437..44bc9aa 100644
return;
__exit_idle();
}
-@@ -355,7 +360,7 @@ bool xen_set_default_idle(void)
+@@ -311,7 +335,7 @@ bool xen_set_default_idle(void)
return ret;
}
#endif
@@ -26586,7 +27749,7 @@ index 9717437..44bc9aa 100644
{
local_irq_disable();
/*
-@@ -533,16 +538,43 @@ static int __init idle_setup(char *str)
+@@ -488,16 +512,40 @@ static int __init idle_setup(char *str)
}
early_param("idle", idle_setup);
@@ -26631,17 +27794,14 @@ index 9717437..44bc9aa 100644
+
+ thread->sp0 ^= time;
+ load_sp0(cpu_tss + smp_processor_id(), thread);
-+
-+#ifdef CONFIG_X86_64
-+ this_cpu_write(kernel_stack, thread->sp0);
-+#endif
++ this_cpu_write(cpu_current_top_of_stack, thread->sp0);
+}
+#endif
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
-index 8ed2106..1345704 100644
+index f73c962..6589332 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
-@@ -64,6 +64,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
+@@ -63,6 +63,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return ((unsigned long *)tsk->thread.sp)[3];
@@ -26649,7 +27809,7 @@ index 8ed2106..1345704 100644
}
void __show_regs(struct pt_regs *regs, int all)
-@@ -76,16 +77,15 @@ void __show_regs(struct pt_regs *regs, int all)
+@@ -75,16 +76,15 @@ void __show_regs(struct pt_regs *regs, int all)
if (user_mode(regs)) {
sp = regs->sp;
ss = regs->ss & 0xffff;
@@ -26668,9 +27828,9 @@ index 8ed2106..1345704 100644
print_symbol("EIP is at %s\n", regs->ip);
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
-@@ -132,21 +132,22 @@ void release_thread(struct task_struct *dead_task)
- int copy_thread(unsigned long clone_flags, unsigned long sp,
- unsigned long arg, struct task_struct *p)
+@@ -131,21 +131,22 @@ void release_thread(struct task_struct *dead_task)
+ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
+ unsigned long arg, struct task_struct *p, unsigned long tls)
{
- struct pt_regs *childregs = task_pt_regs(p);
+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
@@ -26695,16 +27855,16 @@ index 8ed2106..1345704 100644
childregs->fs = __KERNEL_PERCPU;
childregs->bx = sp; /* function */
childregs->bp = arg;
-@@ -244,7 +245,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- struct thread_struct *prev = &prev_p->thread,
- *next = &next_p->thread;
+@@ -245,7 +246,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ struct fpu *prev_fpu = &prev->fpu;
+ struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
- struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+ struct tss_struct *tss = cpu_tss + cpu;
- fpu_switch_t fpu;
+ fpu_switch_t fpu_switch;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
-@@ -263,6 +264,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*/
lazy_save_gs(prev->gs);
@@ -26715,42 +27875,38 @@ index 8ed2106..1345704 100644
/*
* Load the per-thread Thread-Local Storage descriptor.
*/
-@@ -306,12 +311,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -307,9 +312,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* current_thread_info().
*/
load_sp0(tss, next);
-- this_cpu_write(kernel_stack,
-- (unsigned long)task_stack_page(next_p) +
-- THREAD_SIZE);
- this_cpu_write(cpu_current_top_of_stack,
- (unsigned long)task_stack_page(next_p) +
- THREAD_SIZE);
+ this_cpu_write(current_task, next_p);
+ this_cpu_write(current_tinfo, &next_p->tinfo);
-+ this_cpu_write(kernel_stack, next->sp0);
+ this_cpu_write(cpu_current_top_of_stack, next->sp0);
/*
* Restore %gs if needed (which is common)
-@@ -321,8 +324,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -319,8 +324,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- switch_fpu_finish(next_p, fpu);
+ switch_fpu_finish(next_fpu, fpu_switch);
- this_cpu_write(current_task, next_p);
-
return prev_p;
}
-@@ -352,4 +353,3 @@ unsigned long get_wchan(struct task_struct *p)
+@@ -350,4 +353,3 @@ unsigned long get_wchan(struct task_struct *p)
} while (count++ < 16);
return 0;
}
-
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
-index ddfdbf7..625417c 100644
+index f6b9163..1ab8c96 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
-@@ -158,9 +158,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+@@ -157,9 +157,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
struct pt_regs *childregs;
struct task_struct *me = current;
@@ -26762,7 +27918,7 @@ index ddfdbf7..625417c 100644
set_tsk_thread_flag(p, TIF_FORK);
p->thread.io_bitmap_ptr = NULL;
-@@ -170,6 +171,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
+@@ -169,6 +170,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
savesegment(es, p->thread.es);
savesegment(ds, p->thread.ds);
@@ -26771,16 +27927,16 @@ index ddfdbf7..625417c 100644
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
if (unlikely(p->flags & PF_KTHREAD)) {
-@@ -275,7 +278,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- struct thread_struct *prev = &prev_p->thread;
- struct thread_struct *next = &next_p->thread;
+@@ -276,7 +279,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ struct fpu *prev_fpu = &prev->fpu;
+ struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
- struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+ struct tss_struct *tss = cpu_tss + cpu;
unsigned fsindex, gsindex;
- fpu_switch_t fpu;
+ fpu_switch_t fpu_switch;
-@@ -326,6 +329,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -327,6 +330,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);
@@ -26791,7 +27947,7 @@ index ddfdbf7..625417c 100644
/*
* Switch FS and GS.
*
-@@ -397,6 +404,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -398,6 +405,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
* Switch the PDA and FPU contexts.
*/
this_cpu_write(current_task, next_p);
@@ -26799,17 +27955,16 @@ index ddfdbf7..625417c 100644
/*
* If it were not for PREEMPT_ACTIVE we could guarantee that the
-@@ -409,8 +417,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+@@ -410,6 +418,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* Reload esp0 and ss1. This changes current_thread_info(). */
load_sp0(tss, next);
-- this_cpu_write(kernel_stack,
-- (unsigned long)task_stack_page(next_p) + THREAD_SIZE);
-+ this_cpu_write(kernel_stack, next->sp0);
-
++ this_cpu_write(cpu_current_top_of_stack, next->sp0);
++
/*
* Now maybe reload the debug registers and handle I/O bitmaps
-@@ -508,12 +515,11 @@ unsigned long get_wchan(struct task_struct *p)
+ */
+@@ -506,12 +516,11 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack = (unsigned long)task_stack_page(p);
@@ -26825,7 +27980,7 @@ index ddfdbf7..625417c 100644
ip = *(u64 *)(fp+8);
if (!in_sched_functions(ip))
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
-index a7bc794..094ee8e 100644
+index 9be72bc..f4329c5 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -186,10 +186,10 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
@@ -27139,7 +28294,7 @@ index 98111b3..73ca125 100644
identity_mapped:
/* set return address to 0 if not preserving context */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
-index d74ac33..6d14941 100644
+index 80f874b..b3eff67 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -111,6 +111,7 @@
@@ -27166,7 +28321,7 @@ index d74ac33..6d14941 100644
#endif
/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
-@@ -771,7 +774,7 @@ static void __init trim_bios_range(void)
+@@ -772,7 +775,7 @@ static void __init trim_bios_range(void)
* area (640->1Mb) as ram even though it is not.
* take them out.
*/
@@ -27175,16 +28330,16 @@ index d74ac33..6d14941 100644
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
-@@ -779,7 +782,7 @@ static void __init trim_bios_range(void)
+@@ -780,7 +783,7 @@ static void __init trim_bios_range(void)
/* called before trim_bios_range() to spare extra sanitize */
static void __init e820_add_kernel_range(void)
{
- u64 start = __pa_symbol(_text);
-+ u64 start = __pa_symbol(ktla_ktva(_text));
++ u64 start = __pa_symbol(ktla_ktva((unsigned long)_text));
u64 size = __pa_symbol(_end) - start;
/*
-@@ -860,8 +863,8 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
+@@ -861,8 +864,8 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
void __init setup_arch(char **cmdline_p)
{
@@ -27195,15 +28350,16 @@ index d74ac33..6d14941 100644
early_reserve_initrd();
-@@ -959,16 +962,16 @@ void __init setup_arch(char **cmdline_p)
+@@ -960,16 +963,16 @@ void __init setup_arch(char **cmdline_p)
if (!boot_params.hdr.root_flags)
root_mountflags &= ~MS_RDONLY;
- init_mm.start_code = (unsigned long) _text;
- init_mm.end_code = (unsigned long) _etext;
-+ init_mm.start_code = ktla_ktva((unsigned long) _text);
-+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
- init_mm.end_data = (unsigned long) _edata;
+- init_mm.end_data = (unsigned long) _edata;
++ init_mm.start_code = ktla_ktva((unsigned long)_text);
++ init_mm.end_code = ktla_ktva((unsigned long)_etext);
++ init_mm.end_data = (unsigned long)_edata;
init_mm.brk = _brk_end;
mpx_mm_init(&init_mm);
@@ -27211,8 +28367,8 @@ index d74ac33..6d14941 100644
- code_resource.start = __pa_symbol(_text);
- code_resource.end = __pa_symbol(_etext)-1;
- data_resource.start = __pa_symbol(_etext);
-+ code_resource.start = __pa_symbol(ktla_ktva(_text));
-+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
++ code_resource.start = __pa_symbol(ktla_ktva((unsigned long)_text));
++ code_resource.end = __pa_symbol(ktla_ktva((unsigned long)_etext))-1;
+ data_resource.start = __pa_symbol(_sdata);
data_resource.end = __pa_symbol(_edata)-1;
bss_resource.start = __pa_symbol(__bss_start);
@@ -27294,7 +28450,7 @@ index e4fcb87..9c06c55 100644
* Up to this point, the boot CPU has been using .init.data
* area. Reload any changed state for the boot CPU.
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
-index e0fd5f47..b551e66 100644
+index 71820c4..ad16f6b 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -189,7 +189,7 @@ static unsigned long align_sigframe(unsigned long sp)
@@ -27306,7 +28462,7 @@ index e0fd5f47..b551e66 100644
#else /* !CONFIG_X86_32 */
sp = round_down(sp, 16) - 8;
#endif
-@@ -297,10 +297,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
+@@ -298,10 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
}
if (current->mm->context.vdso)
@@ -27319,7 +28475,7 @@ index e0fd5f47..b551e66 100644
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
-@@ -314,7 +313,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
+@@ -315,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
@@ -27328,7 +28484,7 @@ index e0fd5f47..b551e66 100644
if (err)
return -EFAULT;
-@@ -361,8 +360,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+@@ -362,8 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
save_altstack_ex(&frame->uc.uc_stack, regs->sp);
/* Set up to return from userspace. */
@@ -27341,7 +28497,7 @@ index e0fd5f47..b551e66 100644
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
put_user_ex(restorer, &frame->pretcode);
-@@ -374,7 +375,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+@@ -375,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
* reasons and because gdb uses it as a signature to notice
* signal handler stack frames.
*/
@@ -27350,7 +28506,7 @@ index e0fd5f47..b551e66 100644
} put_user_catch(err);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
-@@ -594,7 +595,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+@@ -611,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
{
int usig = ksig->sig;
sigset_t *set = sigmask_to_save();
@@ -27364,7 +28520,7 @@ index e0fd5f47..b551e66 100644
/* Set up the stack frame */
if (is_ia32_frame()) {
-@@ -605,7 +611,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
+@@ -622,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
} else if (is_x32_frame()) {
return x32_setup_rt_frame(ksig, cset, regs);
} else {
@@ -27374,10 +28530,10 @@ index e0fd5f47..b551e66 100644
}
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
-index be8e1bd..a3d93fa 100644
+index 15aaa69..66103af 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
-@@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
+@@ -334,7 +334,7 @@ static int __init nonmi_ipi_setup(char *str)
__setup("nonmi_ipi", nonmi_ipi_setup);
@@ -27387,10 +28543,10 @@ index be8e1bd..a3d93fa 100644
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
-index 50e547e..d59d06a 100644
+index b1f3ed9c..b76221b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
-@@ -226,14 +226,17 @@ static void notrace start_secondary(void *unused)
+@@ -220,14 +220,17 @@ static void notrace start_secondary(void *unused)
enable_start_cpu0 = 0;
@@ -27412,29 +28568,26 @@ index 50e547e..d59d06a 100644
/*
* Check TSC synchronization with the BP:
*/
-@@ -782,18 +785,17 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
+@@ -808,16 +811,15 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
alternatives_enable_smp();
per_cpu(current_task, cpu) = idle;
+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
#ifdef CONFIG_X86_32
- /* Stack for startup_32 can be just as for start_secondary onwards */
+- /* Stack for startup_32 can be just as for start_secondary onwards */
irq_ctx_init(cpu);
- per_cpu(cpu_current_top_of_stack, cpu) =
- (unsigned long)task_stack_page(idle) + THREAD_SIZE;
-+ per_cpu(cpu_current_top_of_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
#else
clear_tsk_thread_flag(idle, TIF_FORK);
initial_gs = per_cpu_offset(cpu);
#endif
-- per_cpu(kernel_stack, cpu) =
-- (unsigned long)task_stack_page(idle) + THREAD_SIZE;
-+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
++ per_cpu(cpu_current_top_of_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
}
/*
-@@ -814,9 +816,11 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
+@@ -838,9 +840,11 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
unsigned long timeout;
idle->thread.sp = (unsigned long) (((struct pt_regs *)
@@ -27447,7 +28600,7 @@ index 50e547e..d59d06a 100644
initial_code = (unsigned long)start_secondary;
stack_start = idle->thread.sp;
-@@ -961,6 +965,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
+@@ -992,6 +996,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
common_cpu_up(cpu, tidle);
@@ -27460,27 +28613,14 @@ index 50e547e..d59d06a 100644
+ KERNEL_PGD_PTRS);
+#endif
+
- err = do_boot_cpu(apicid, cpu, tidle);
- if (err) {
- pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
+ /*
+ * We have to walk the irq descriptors to setup the vector
+ * space for the cpu which comes online. Prevent irq
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
-index 9b4d51d..5d28b58 100644
+index 0ccb53a..fbc4759 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
-@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
- struct desc_struct *desc;
- unsigned long base;
-
-- seg &= ~7UL;
-+ seg >>= 3;
-
- mutex_lock(&child->mm->context.lock);
-- if (unlikely((seg >> 3) >= child->mm->context.size))
-+ if (unlikely(seg >= child->mm->context.size))
- addr = -1L; /* bogus selector, access would fault */
- else {
- desc = child->mm->context.ldt + seg;
-@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+@@ -44,7 +44,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
addr += base;
}
mutex_unlock(&child->mm->context.lock);
@@ -27490,7 +28630,7 @@ index 9b4d51d..5d28b58 100644
return addr;
}
-@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
+@@ -55,6 +56,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
unsigned char opcode[15];
unsigned long addr = convert_ip_to_linear(child, regs);
@@ -27926,10 +29066,10 @@ index 1c113db..287b42e 100644
static int trace_irq_vector_refcount;
static DEFINE_MUTEX(irq_vector_mutex);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
-index 324ab52..0cfd2d05 100644
+index f579192..aed90b8 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
-@@ -68,7 +68,7 @@
+@@ -69,7 +69,7 @@
#include <asm/proto.h>
/* No need to be aligned, but done to keep all IDTs defined the same way. */
@@ -27938,7 +29078,7 @@ index 324ab52..0cfd2d05 100644
#else
#include <asm/processor-flags.h>
#include <asm/setup.h>
-@@ -77,7 +77,7 @@ asmlinkage int system_call(void);
+@@ -77,7 +77,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
#endif
/* Must be page-aligned because the real IDT is used in a fixmap. */
@@ -28016,7 +29156,7 @@ index 324ab52..0cfd2d05 100644
#ifdef CONFIG_DOUBLEFAULT
df_debug(regs, error_code);
#endif
-@@ -475,11 +492,35 @@ do_general_protection(struct pt_regs *regs, long error_code)
+@@ -473,11 +490,35 @@ do_general_protection(struct pt_regs *regs, long error_code)
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
@@ -28053,7 +29193,7 @@ index 324ab52..0cfd2d05 100644
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_GP;
-@@ -578,6 +619,9 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
+@@ -576,6 +617,9 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
container_of(task_pt_regs(current),
struct bad_iret_stack, regs);
@@ -28064,7 +29204,7 @@ index 324ab52..0cfd2d05 100644
memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
-index 5054497..139f8f8 100644
+index 7437b41..45f6250 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -150,7 +150,7 @@ static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
@@ -28077,10 +29217,10 @@ index 5054497..139f8f8 100644
/*
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
-index 0b81ad6..fff670e 100644
+index 6647624..2056791 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
-@@ -986,7 +986,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
+@@ -978,7 +978,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
if (nleft != rasize) {
pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
@@ -28170,7 +29310,7 @@ index fc9db6e..2c5865d 100644
goto cannot_handle;
if ((segoffs >> 16) == BIOSSEG)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
-index 00bf300..129df8e 100644
+index 00bf300..03e1c3b 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -26,6 +26,13 @@
@@ -28205,10 +29345,11 @@ index 00bf300..129df8e 100644
#ifdef CONFIG_SMP
percpu PT_LOAD FLAGS(6); /* RW_ */
#endif
+- init PT_LOAD FLAGS(7); /* RWE */
+-#endif
+ text.init PT_LOAD FLAGS(5); /* R_E */
+ text.exit PT_LOAD FLAGS(5); /* R_E */
- init PT_LOAD FLAGS(7); /* RWE */
--#endif
++ init PT_LOAD FLAGS(6); /* RW_ */
note PT_NOTE FLAGS(0); /* ___ */
}
@@ -28338,14 +29479,13 @@ index 00bf300..129df8e 100644
/*
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
* output PHDR, so the next output section - .init.text - should
-@@ -190,12 +255,27 @@ SECTIONS
+@@ -190,12 +255,33 @@ SECTIONS
"per-CPU data too large - increase CONFIG_PHYSICAL_START")
#endif
- INIT_TEXT_SECTION(PAGE_SIZE)
-#ifdef CONFIG_X86_64
- :init
--#endif
+ . = ALIGN(PAGE_SIZE);
+ init_begin = .;
+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
@@ -28353,8 +29493,7 @@ index 00bf300..129df8e 100644
+ INIT_TEXT
+ . = ALIGN(PAGE_SIZE);
+ } :text.init
-
-- INIT_DATA_SECTION(16)
++
+ /*
+ * .exit.text is discard at runtime, not link time, to deal with
+ * references from .altinstructions and .eh_frame
@@ -28362,7 +29501,14 @@ index 00bf300..129df8e 100644
+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
+ EXIT_TEXT
+ VMLINUX_SYMBOL(_einittext) = .;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(HPAGE_SIZE);
++#else
+ . = ALIGN(16);
+ #endif
+
+- INIT_DATA_SECTION(16)
+ } :text.exit
+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
+
@@ -28371,7 +29517,7 @@ index 00bf300..129df8e 100644
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
__x86_cpu_dev_start = .;
-@@ -266,19 +346,12 @@ SECTIONS
+@@ -266,19 +352,12 @@ SECTIONS
}
. = ALIGN(8);
@@ -28392,7 +29538,7 @@ index 00bf300..129df8e 100644
PERCPU_SECTION(INTERNODE_CACHE_BYTES)
#endif
-@@ -297,16 +370,10 @@ SECTIONS
+@@ -297,16 +376,10 @@ SECTIONS
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
__smp_locks = .;
*(.smp_locks)
@@ -28410,7 +29556,7 @@ index 00bf300..129df8e 100644
/* BSS */
. = ALIGN(PAGE_SIZE);
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
-@@ -322,6 +389,7 @@ SECTIONS
+@@ -322,6 +395,7 @@ SECTIONS
__brk_base = .;
. += 64 * 1024; /* 64k alignment slop space */
*(.brk_reservation) /* areas brk users have reserved */
@@ -28418,7 +29564,7 @@ index 00bf300..129df8e 100644
__brk_limit = .;
}
-@@ -348,13 +416,12 @@ SECTIONS
+@@ -348,13 +422,12 @@ SECTIONS
* for the boot processor.
*/
#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
@@ -28433,62 +29579,8 @@ index 00bf300..129df8e 100644
"kernel image bigger than KERNEL_IMAGE_SIZE");
#ifdef CONFIG_SMP
-diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
-index 2dcc6ff..082dc7a 100644
---- a/arch/x86/kernel/vsyscall_64.c
-+++ b/arch/x86/kernel/vsyscall_64.c
-@@ -38,15 +38,13 @@
- #define CREATE_TRACE_POINTS
- #include "vsyscall_trace.h"
-
--static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
-+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
-
- static int __init vsyscall_setup(char *str)
- {
- if (str) {
- if (!strcmp("emulate", str))
- vsyscall_mode = EMULATE;
-- else if (!strcmp("native", str))
-- vsyscall_mode = NATIVE;
- else if (!strcmp("none", str))
- vsyscall_mode = NONE;
- else
-@@ -264,8 +262,7 @@ do_ret:
- return true;
-
- sigsegv:
-- force_sig(SIGSEGV, current);
-- return true;
-+ do_group_exit(SIGKILL);
- }
-
- /*
-@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
- static struct vm_area_struct gate_vma = {
- .vm_start = VSYSCALL_ADDR,
- .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
-- .vm_page_prot = PAGE_READONLY_EXEC,
-- .vm_flags = VM_READ | VM_EXEC,
-+ .vm_page_prot = PAGE_READONLY,
-+ .vm_flags = VM_READ,
- .vm_ops = &gate_vma_ops,
- };
-
-@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
- unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
-
- if (vsyscall_mode != NONE)
-- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
-- vsyscall_mode == NATIVE
-- ? PAGE_KERNEL_VSYSCALL
-- : PAGE_KERNEL_VVAR);
-+ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
-
- BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
- (unsigned long)VSYSCALL_ADDR);
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
-index 37d8fa4..66e319a 100644
+index a0695be..33e180c 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
@@ -28500,19 +29592,19 @@ index 37d8fa4..66e319a 100644
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
-@@ -79,3 +77,7 @@ EXPORT_SYMBOL(___preempt_schedule);
- EXPORT_SYMBOL(___preempt_schedule_context);
- #endif
+@@ -77,3 +75,7 @@ EXPORT_SYMBOL(native_load_gs_index);
+ EXPORT_SYMBOL(___preempt_schedule);
+ EXPORT_SYMBOL(___preempt_schedule_notrace);
#endif
+
+#ifdef CONFIG_PAX_PER_CPU_PGD
+EXPORT_SYMBOL(cpu_pgd);
+#endif
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
-index 234b072..b7ab191 100644
+index 3839628..2e5b5b35 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
-@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
+@@ -92,7 +92,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
static void default_nmi_init(void) { };
static int default_i8042_detect(void) { return 1; };
@@ -28521,80 +29613,29 @@ index 234b072..b7ab191 100644
.calibrate_tsc = native_calibrate_tsc,
.get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss,
-@@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
+@@ -108,7 +108,7 @@ struct x86_platform_ops x86_platform = {
EXPORT_SYMBOL_GPL(x86_platform);
#if defined(CONFIG_PCI_MSI)
-struct x86_msi_ops x86_msi = {
+struct x86_msi_ops x86_msi __read_only = {
.setup_msi_irqs = native_setup_msi_irqs,
- .compose_msi_msg = native_compose_msi_msg,
.teardown_msi_irq = native_teardown_msi_irq,
-@@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
+ .teardown_msi_irqs = default_teardown_msi_irqs,
+@@ -137,7 +137,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
}
#endif
-struct x86_io_apic_ops x86_io_apic_ops = {
+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
- .init = native_io_apic_init_mappings,
.read = native_io_apic_read,
- .write = native_io_apic_write,
-diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
-index 87a815b..727dbe6 100644
---- a/arch/x86/kernel/xsave.c
-+++ b/arch/x86/kernel/xsave.c
-@@ -168,18 +168,18 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
-
- /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
- sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
-- err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
-+ err = __copy_to_user(x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
-
- if (!use_xsave())
- return err;
-
-- err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
-+ err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + xstate_size));
-
- /*
- * Read the xstate_bv which we copied (directly from the cpu or
- * from the state in task struct) to the user buffers.
- */
-- err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
-+ err |= __get_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
-
- /*
- * For legacy compatible, we always set FP/SSE bits in the bit
-@@ -194,7 +194,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
- */
- xstate_bv |= XSTATE_FPSSE;
-
-- err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
-+ err |= __put_user(xstate_bv, (__u32 __user *)&x->xsave_hdr.xstate_bv);
-
- return err;
- }
-@@ -203,6 +203,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
- {
- int err;
-
-+ buf = (struct xsave_struct __user *)____m(buf);
- if (use_xsave())
- err = xsave_user(buf);
- else if (use_fxsr())
-@@ -313,6 +314,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
- */
- static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
- {
-+ buf = (void __user *)____m(buf);
- if (use_xsave()) {
- if ((unsigned long)buf % 64 || fx_only) {
- u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
+ .disable = native_disable_io_apic,
+ };
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
-index 1d08ad3..c6a4faf 100644
+index 2fbea25..9e0f8c7 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
-@@ -204,15 +204,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
+@@ -206,15 +206,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries)
{
@@ -28618,7 +29659,7 @@ index 1d08ad3..c6a4faf 100644
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
kvm_x86_ops->cpuid_update(vcpu);
-@@ -225,15 +230,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
+@@ -227,15 +232,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries)
{
@@ -28642,10 +29683,10 @@ index 1d08ad3..c6a4faf 100644
out:
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 630bcb0..a7f6d9e 100644
+index e7a4fde..623af93 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
-@@ -3569,7 +3569,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
+@@ -3847,7 +3847,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
int cr = ctxt->modrm_reg;
u64 efer = 0;
@@ -28655,7 +29696,7 @@ index 630bcb0..a7f6d9e 100644
0, 0, 0, /* CR3 checked later */
CR4_RESERVED_BITS,
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 67d07e0..10769d5 100644
+index 2a5ca97..ce8577a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -56,7 +56,7 @@
@@ -28668,7 +29709,7 @@ index 67d07e0..10769d5 100644
#define APIC_LVT_NUM 6
/* 14 is the version for Xeon and Pentium 8.4.8*/
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
-index 6e6d115..43fecbf 100644
+index 0f67d7e..4b9fa11 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -343,7 +343,7 @@ retry_walk:
@@ -28681,10 +29722,10 @@ index 6e6d115..43fecbf 100644
goto error;
walker->ptep_user[walker->level - 1] = ptep_user;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index 4911bf1..e7d3ed2 100644
+index 8e0c084..bdb9c3b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
-@@ -3577,7 +3577,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
+@@ -3688,7 +3688,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
int cpu = raw_smp_processor_id();
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
@@ -28696,7 +29737,7 @@ index 4911bf1..e7d3ed2 100644
load_TR_desc();
}
-@@ -3973,6 +3977,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -4084,6 +4088,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
#endif
#endif
@@ -28708,7 +29749,7 @@ index 4911bf1..e7d3ed2 100644
local_irq_disable();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
-index 2d73807..84a0e59 100644
+index 83b7b5c..26d8b1b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1440,12 +1440,12 @@ static void vmcs_write64(unsigned long field, u64 value)
@@ -28749,7 +29790,7 @@ index 2d73807..84a0e59 100644
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
vmx->loaded_vmcs->cpu = cpu;
-@@ -2233,7 +2241,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
+@@ -2232,7 +2240,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
* reads and returns guest's timestamp counter "register"
* guest_tsc = host_tsc + tsc_offset -- 21.3
*/
@@ -28758,7 +29799,7 @@ index 2d73807..84a0e59 100644
{
u64 host_tsc, tsc_offset;
-@@ -4467,7 +4475,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4459,7 +4467,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
unsigned long cr4;
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
@@ -28769,7 +29810,7 @@ index 2d73807..84a0e59 100644
/* Save the most likely value for this task's CR4 in the VMCS. */
cr4 = cr4_read_shadow();
-@@ -4494,7 +4505,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+@@ -4486,7 +4497,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
vmx->host_idt_base = dt.address;
@@ -28778,7 +29819,7 @@ index 2d73807..84a0e59 100644
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
-@@ -6107,11 +6118,17 @@ static __init int hardware_setup(void)
+@@ -6097,11 +6108,17 @@ static __init int hardware_setup(void)
* page upon invalidation. No need to do anything if not
* using the APIC_ACCESS_ADDR VMCS field.
*/
@@ -28800,7 +29841,7 @@ index 2d73807..84a0e59 100644
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
-@@ -6122,14 +6139,16 @@ static __init int hardware_setup(void)
+@@ -6112,14 +6129,16 @@ static __init int hardware_setup(void)
if (!cpu_has_vmx_apicv())
enable_apicv = 0;
@@ -28822,7 +29863,7 @@ index 2d73807..84a0e59 100644
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
-@@ -6182,10 +6201,12 @@ static __init int hardware_setup(void)
+@@ -6172,10 +6191,12 @@ static __init int hardware_setup(void)
enable_pml = 0;
if (!enable_pml) {
@@ -28839,7 +29880,7 @@ index 2d73807..84a0e59 100644
}
return alloc_kvm_area();
-@@ -8230,6 +8251,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8378,6 +8399,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"jmp 2f \n\t"
"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
"2: "
@@ -28852,7 +29893,7 @@ index 2d73807..84a0e59 100644
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
"pop %0 \n\t"
-@@ -8282,6 +8309,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8430,6 +8457,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
[wordsize]"i"(sizeof(ulong))
@@ -28864,7 +29905,7 @@ index 2d73807..84a0e59 100644
: "cc", "memory"
#ifdef CONFIG_X86_64
, "rax", "rbx", "rdi", "rsi"
-@@ -8295,7 +8327,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8443,7 +8475,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (debugctlmsr)
update_debugctlmsr(debugctlmsr);
@@ -28873,7 +29914,7 @@ index 2d73807..84a0e59 100644
/*
* The sysexit path does not restore ds/es, so we must set them to
* a reasonable value ourselves.
-@@ -8304,8 +8336,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+@@ -8452,8 +8484,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* may be executed in interrupt context, which saves and restore segments
* around it, nullifying its effect.
*/
@@ -28895,10 +29936,10 @@ index 2d73807..84a0e59 100644
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index ea306ad..669f42d 100644
+index 8f0f6ec..9cee69e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -1929,8 +1929,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
+@@ -1842,8 +1842,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
@@ -28909,7 +29950,7 @@ index ea306ad..669f42d 100644
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
-@@ -2867,6 +2867,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
+@@ -2731,6 +2731,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
@@ -28918,7 +29959,43 @@ index ea306ad..669f42d 100644
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
-@@ -5784,7 +5786,7 @@ static struct notifier_block pvclock_gtod_notifier = {
+@@ -3091,7 +3093,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+
+ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+ {
+- struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
++ struct xregs_state *xsave = &vcpu->arch.guest_fpu.state->xsave;
+ u64 xstate_bv = xsave->header.xfeatures;
+ u64 valid;
+
+@@ -3127,7 +3129,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+
+ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
+ {
+- struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
++ struct xregs_state *xsave = &vcpu->arch.guest_fpu.state->xsave;
+ u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
+ u64 valid;
+
+@@ -3171,7 +3173,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+ fill_xsave((u8 *) guest_xsave->region, vcpu);
+ } else {
+ memcpy(guest_xsave->region,
+- &vcpu->arch.guest_fpu.state.fxsave,
++ &vcpu->arch.guest_fpu.state->fxsave,
+ sizeof(struct fxregs_state));
+ *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
+ XSTATE_FPSSE;
+@@ -3196,7 +3198,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
+ } else {
+ if (xstate_bv & ~XSTATE_FPSSE)
+ return -EINVAL;
+- memcpy(&vcpu->arch.guest_fpu.state.fxsave,
++ memcpy(&vcpu->arch.guest_fpu.state->fxsave,
+ guest_xsave->region, sizeof(struct fxregs_state));
+ }
+ return 0;
+@@ -5786,7 +5788,7 @@ static struct notifier_block pvclock_gtod_notifier = {
};
#endif
@@ -28927,8 +30004,94 @@ index ea306ad..669f42d 100644
{
int r;
struct kvm_x86_ops *ops = opaque;
+@@ -7210,7 +7212,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ {
+ struct fxregs_state *fxsave =
+- &vcpu->arch.guest_fpu.state.fxsave;
++ &vcpu->arch.guest_fpu.state->fxsave;
+
+ memcpy(fpu->fpr, fxsave->st_space, 128);
+ fpu->fcw = fxsave->cwd;
+@@ -7227,7 +7229,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+ {
+ struct fxregs_state *fxsave =
+- &vcpu->arch.guest_fpu.state.fxsave;
++ &vcpu->arch.guest_fpu.state->fxsave;
+
+ memcpy(fxsave->st_space, fpu->fpr, 128);
+ fxsave->cwd = fpu->fcw;
+@@ -7243,9 +7245,9 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+
+ static void fx_init(struct kvm_vcpu *vcpu)
+ {
+- fpstate_init(&vcpu->arch.guest_fpu.state);
++ fpstate_init(vcpu->arch.guest_fpu.state);
+ if (cpu_has_xsaves)
+- vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
++ vcpu->arch.guest_fpu.state->xsave.header.xcomp_bv =
+ host_xcr0 | XSTATE_COMPACTION_ENABLED;
+
+ /*
+@@ -7269,7 +7271,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+ kvm_put_guest_xcr0(vcpu);
+ vcpu->guest_fpu_loaded = 1;
+ __kernel_fpu_begin();
+- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
++ __copy_kernel_to_fpregs(vcpu->arch.guest_fpu.state);
+ trace_kvm_fpu(1);
+ }
+
+@@ -7547,6 +7549,8 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+
+ struct static_key kvm_no_apic_vcpu __read_mostly;
+
++extern struct kmem_cache *fpregs_state_cachep;
++
+ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ {
+ struct page *page;
+@@ -7563,11 +7567,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+ else
+ vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
+
+- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+- if (!page) {
+- r = -ENOMEM;
++ r = -ENOMEM;
++ vcpu->arch.guest_fpu.state = kmem_cache_alloc(fpregs_state_cachep, GFP_KERNEL);
++ if (!vcpu->arch.guest_fpu.state)
+ goto fail;
+- }
++
++ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
++ if (!page)
++ goto fail_free_fpregs;
+ vcpu->arch.pio_data = page_address(page);
+
+ kvm_set_tsc_khz(vcpu, max_tsc_khz);
+@@ -7621,6 +7628,9 @@ fail_mmu_destroy:
+ kvm_mmu_destroy(vcpu);
+ fail_free_pio_data:
+ free_page((unsigned long)vcpu->arch.pio_data);
++fail_free_fpregs:
++ kmem_cache_free(fpregs_state_cachep, vcpu->arch.guest_fpu.state);
++ vcpu->arch.guest_fpu.state = NULL;
+ fail:
+ return r;
+ }
+@@ -7638,6 +7648,8 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+ free_page((unsigned long)vcpu->arch.pio_data);
+ if (!irqchip_in_kernel(vcpu->kvm))
+ static_key_slow_dec(&kvm_no_apic_vcpu);
++ kmem_cache_free(fpregs_state_cachep, vcpu->arch.guest_fpu.state);
++ vcpu->arch.guest_fpu.state = NULL;
+ }
+
+ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
-index 8f9a133..3c7694b 100644
+index f2dc08c..d85d906 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1341,9 +1341,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
@@ -28944,10 +30107,10 @@ index 8f9a133..3c7694b 100644
/*G:050
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
-index 00933d5..3a64af9 100644
+index 9b0ca8f..bb4af41 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
-@@ -48,6 +48,10 @@ BEGIN(read)
+@@ -45,6 +45,10 @@ BEGIN(read)
movl (v), %eax
movl 4(v), %edx
RET_ENDP
@@ -28958,7 +30121,7 @@ index 00933d5..3a64af9 100644
#undef v
#define v %esi
-@@ -55,6 +59,10 @@ BEGIN(set)
+@@ -52,6 +56,10 @@ BEGIN(set)
movl %ebx, (v)
movl %ecx, 4(v)
RET_ENDP
@@ -28969,7 +30132,7 @@ index 00933d5..3a64af9 100644
#undef v
#define v %esi
-@@ -70,6 +78,20 @@ RET_ENDP
+@@ -67,6 +75,20 @@ RET_ENDP
BEGIN(add)
addl %eax, (v)
adcl %edx, 4(v)
@@ -28990,7 +30153,7 @@ index 00933d5..3a64af9 100644
RET_ENDP
#undef v
-@@ -77,6 +99,24 @@ RET_ENDP
+@@ -74,6 +96,24 @@ RET_ENDP
BEGIN(add_return)
addl (v), %eax
adcl 4(v), %edx
@@ -29015,7 +30178,7 @@ index 00933d5..3a64af9 100644
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
-@@ -86,6 +126,20 @@ RET_ENDP
+@@ -83,6 +123,20 @@ RET_ENDP
BEGIN(sub)
subl %eax, (v)
sbbl %edx, 4(v)
@@ -29036,7 +30199,7 @@ index 00933d5..3a64af9 100644
RET_ENDP
#undef v
-@@ -96,6 +150,27 @@ BEGIN(sub_return)
+@@ -93,6 +147,27 @@ BEGIN(sub_return)
sbbl $0, %edx
addl (v), %eax
adcl 4(v), %edx
@@ -29064,7 +30227,7 @@ index 00933d5..3a64af9 100644
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
-@@ -105,6 +180,20 @@ RET_ENDP
+@@ -102,6 +177,20 @@ RET_ENDP
BEGIN(inc)
addl $1, (v)
adcl $0, 4(v)
@@ -29085,7 +30248,7 @@ index 00933d5..3a64af9 100644
RET_ENDP
#undef v
-@@ -114,6 +203,26 @@ BEGIN(inc_return)
+@@ -111,6 +200,26 @@ BEGIN(inc_return)
movl 4(v), %edx
addl $1, %eax
adcl $0, %edx
@@ -29112,7 +30275,7 @@ index 00933d5..3a64af9 100644
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
-@@ -123,6 +232,20 @@ RET_ENDP
+@@ -120,6 +229,20 @@ RET_ENDP
BEGIN(dec)
subl $1, (v)
sbbl $0, 4(v)
@@ -29133,7 +30296,7 @@ index 00933d5..3a64af9 100644
RET_ENDP
#undef v
-@@ -132,6 +255,26 @@ BEGIN(dec_return)
+@@ -129,6 +252,26 @@ BEGIN(dec_return)
movl 4(v), %edx
subl $1, %eax
sbbl $0, %edx
@@ -29160,7 +30323,7 @@ index 00933d5..3a64af9 100644
movl %eax, (v)
movl %edx, 4(v)
RET_ENDP
-@@ -143,6 +286,13 @@ BEGIN(add_unless)
+@@ -140,6 +283,13 @@ BEGIN(add_unless)
adcl %edx, %edi
addl (v), %eax
adcl 4(v), %edx
@@ -29174,7 +30337,7 @@ index 00933d5..3a64af9 100644
cmpl %eax, %ecx
je 3f
1:
-@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
+@@ -165,6 +315,13 @@ BEGIN(inc_not_zero)
1:
addl $1, %eax
adcl $0, %edx
@@ -29188,7 +30351,7 @@ index 00933d5..3a64af9 100644
movl %eax, (v)
movl %edx, 4(v)
movl $1, %eax
-@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
+@@ -183,6 +340,13 @@ BEGIN(dec_if_positive)
movl 4(v), %edx
subl $1, %eax
sbbl $0, %edx
@@ -29203,42 +30366,35 @@ index 00933d5..3a64af9 100644
movl %eax, (v)
movl %edx, 4(v)
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
-index 082a851..6a963bc 100644
+index db3ae854..b8ad0de 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
-@@ -25,10 +25,20 @@ ENTRY(atomic64_read_cx8)
- CFI_STARTPROC
+@@ -22,9 +22,16 @@
+ ENTRY(atomic64_read_cx8)
read64 %ecx
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(atomic64_read_cx8)
+ENTRY(atomic64_read_unchecked_cx8)
-+ CFI_STARTPROC
-+
+ read64 %ecx
+ pax_force_retaddr
+ ret
-+ CFI_ENDPROC
+ENDPROC(atomic64_read_unchecked_cx8)
+
ENTRY(atomic64_set_cx8)
- CFI_STARTPROC
-
-@@ -38,10 +48,25 @@ ENTRY(atomic64_set_cx8)
+ 1:
+ /* we don't need LOCK_PREFIX since aligned 64-bit writes
+@@ -32,20 +39,33 @@ ENTRY(atomic64_set_cx8)
cmpxchg8b (%esi)
jne 1b
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(atomic64_set_cx8)
+ENTRY(atomic64_set_unchecked_cx8)
-+ CFI_STARTPROC
-+
+1:
+/* we don't need LOCK_PREFIX since aligned 64-bit writes
+ * are atomic on 586 and newer */
@@ -29247,29 +30403,26 @@ index 082a851..6a963bc 100644
+
+ pax_force_retaddr
+ ret
-+ CFI_ENDPROC
+ENDPROC(atomic64_set_unchecked_cx8)
+
ENTRY(atomic64_xchg_cx8)
- CFI_STARTPROC
-
-@@ -50,12 +75,13 @@ ENTRY(atomic64_xchg_cx8)
+ 1:
+ LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(atomic64_xchg_cx8)
-.macro addsub_return func ins insc
-ENTRY(atomic64_\func\()_return_cx8)
+.macro addsub_return func ins insc unchecked=""
+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
- CFI_STARTPROC
- pushl_cfi_reg ebp
- pushl_cfi_reg ebx
-@@ -72,27 +98,44 @@ ENTRY(atomic64_\func\()_return_cx8)
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+@@ -61,26 +81,43 @@ ENTRY(atomic64_\func\()_return_cx8)
movl %edx, %ecx
\ins\()l %esi, %ebx
\insc\()l %edi, %ecx
@@ -29296,13 +30449,12 @@ index 082a851..6a963bc 100644
+#endif
+.endif
+
- popl_cfi_reg edi
- popl_cfi_reg esi
- popl_cfi_reg ebx
- popl_cfi_reg ebp
+ popl %edi
+ popl %esi
+ popl %ebx
+ popl %ebp
+ pax_force_retaddr
ret
- CFI_ENDPROC
-ENDPROC(atomic64_\func\()_return_cx8)
+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
.endm
@@ -29316,10 +30468,10 @@ index 082a851..6a963bc 100644
-ENTRY(atomic64_\func\()_return_cx8)
+.macro incdec_return func ins insc unchecked=""
+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
- CFI_STARTPROC
- pushl_cfi_reg ebx
+ pushl %ebx
-@@ -102,21 +145,38 @@ ENTRY(atomic64_\func\()_return_cx8)
+ read64 %esi
+@@ -89,20 +126,37 @@ ENTRY(atomic64_\func\()_return_cx8)
movl %edx, %ecx
\ins\()l $1, %ebx
\insc\()l $0, %ecx
@@ -29346,10 +30498,9 @@ index 082a851..6a963bc 100644
+#endif
+.endif
+
- popl_cfi_reg ebx
+ popl %ebx
+ pax_force_retaddr
ret
- CFI_ENDPROC
-ENDPROC(atomic64_\func\()_return_cx8)
+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
.endm
@@ -29360,8 +30511,8 @@ index 082a851..6a963bc 100644
+incdec_return dec sub sbb _unchecked
ENTRY(atomic64_dec_if_positive_cx8)
- CFI_STARTPROC
-@@ -128,6 +188,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
+ pushl %ebx
+@@ -113,6 +167,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
movl %edx, %ecx
subl $1, %ebx
sbb $0, %ecx
@@ -29375,15 +30526,15 @@ index 082a851..6a963bc 100644
js 2f
LOCK_PREFIX
cmpxchg8b (%esi)
-@@ -137,6 +204,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
+@@ -122,6 +183,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
movl %ebx, %eax
movl %ecx, %edx
- popl_cfi_reg ebx
+ popl %ebx
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(atomic64_dec_if_positive_cx8)
-@@ -161,6 +229,13 @@ ENTRY(atomic64_add_unless_cx8)
+
+@@ -144,6 +206,13 @@ ENTRY(atomic64_add_unless_cx8)
movl %edx, %ecx
addl %ebp, %ebx
adcl %edi, %ecx
@@ -29397,15 +30548,15 @@ index 082a851..6a963bc 100644
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
-@@ -171,6 +246,7 @@ ENTRY(atomic64_add_unless_cx8)
- CFI_ADJUST_CFA_OFFSET -8
- popl_cfi_reg ebx
- popl_cfi_reg ebp
+@@ -153,6 +222,7 @@ ENTRY(atomic64_add_unless_cx8)
+ addl $8, %esp
+ popl %ebx
+ popl %ebp
+ pax_force_retaddr
ret
4:
cmpl %edx, 4(%esp)
-@@ -193,6 +269,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
+@@ -173,6 +243,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
xorl %ecx, %ecx
addl $1, %ebx
adcl %edx, %ecx
@@ -29419,20 +30570,19 @@ index 082a851..6a963bc 100644
LOCK_PREFIX
cmpxchg8b (%esi)
jne 1b
-@@ -200,6 +283,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
+@@ -180,5 +257,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
movl $1, %eax
3:
- popl_cfi_reg ebx
+ popl %ebx
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
-index 9bc944a..e52be6c 100644
+index c1e6232..758bc31 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
-@@ -29,7 +29,8 @@
- #include <asm/dwarf2.h>
+@@ -28,7 +28,8 @@
+ #include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/asm.h>
-
@@ -29441,34 +30591,29 @@ index 9bc944a..e52be6c 100644
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
-@@ -285,9 +286,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
+@@ -280,7 +281,20 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
#define ARGBASE 16
#define FP 12
-
--ENTRY(csum_partial_copy_generic)
-+
-+ENTRY(csum_partial_copy_generic_to_user)
- CFI_STARTPROC
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pushl_cfi %gs
-+ popl_cfi %es
++ pushl %gs
++ popl %es
+ jmp csum_partial_copy_generic
+#endif
+
+ENTRY(csum_partial_copy_generic_from_user)
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pushl_cfi %gs
-+ popl_cfi %ds
++ pushl %gs
++ popl %ds
+#endif
+
-+ENTRY(csum_partial_copy_generic)
+ ENTRY(csum_partial_copy_generic)
subl $4,%esp
- CFI_ADJUST_CFA_OFFSET 4
- pushl_cfi_reg edi
-@@ -306,7 +322,7 @@ ENTRY(csum_partial_copy_generic)
+ pushl %edi
+@@ -299,7 +313,7 @@ ENTRY(csum_partial_copy_generic)
jmp 4f
SRC(1: movw (%esi), %bx )
addl $2, %esi
@@ -29477,7 +30622,7 @@ index 9bc944a..e52be6c 100644
addl $2, %edi
addw %bx, %ax
adcl $0, %eax
-@@ -318,30 +334,30 @@ DST( movw %bx, (%edi) )
+@@ -311,30 +325,30 @@ DST( movw %bx, (%edi) )
SRC(1: movl (%esi), %ebx )
SRC( movl 4(%esi), %edx )
adcl %ebx, %eax
@@ -29516,7 +30661,7 @@ index 9bc944a..e52be6c 100644
lea 32(%esi), %esi
lea 32(%edi), %edi
-@@ -355,7 +371,7 @@ DST( movl %edx, 28(%edi) )
+@@ -348,7 +362,7 @@ DST( movl %edx, 28(%edi) )
shrl $2, %edx # This clears CF
SRC(3: movl (%esi), %ebx )
adcl %ebx, %eax
@@ -29525,7 +30670,7 @@ index 9bc944a..e52be6c 100644
lea 4(%esi), %esi
lea 4(%edi), %edi
dec %edx
-@@ -367,12 +383,12 @@ DST( movl %ebx, (%edi) )
+@@ -360,12 +374,12 @@ DST( movl %ebx, (%edi) )
jb 5f
SRC( movw (%esi), %cx )
leal 2(%esi), %esi
@@ -29540,7 +30685,7 @@ index 9bc944a..e52be6c 100644
6: addl %ecx, %eax
adcl $0, %eax
7:
-@@ -383,7 +399,7 @@ DST( movb %cl, (%edi) )
+@@ -376,7 +390,7 @@ DST( movb %cl, (%edi) )
6001:
movl ARGBASE+20(%esp), %ebx # src_err_ptr
@@ -29549,7 +30694,7 @@ index 9bc944a..e52be6c 100644
# zero the complete destination - computing the rest
# is too much work
-@@ -396,37 +412,58 @@ DST( movb %cl, (%edi) )
+@@ -389,34 +403,58 @@ DST( movb %cl, (%edi) )
6002:
movl ARGBASE+24(%esp), %ebx # dst_err_ptr
@@ -29559,16 +30704,18 @@ index 9bc944a..e52be6c 100644
.previous
-+ pushl_cfi %ss
-+ popl_cfi %ds
-+ pushl_cfi %ss
-+ popl_cfi %es
- popl_cfi_reg ebx
- popl_cfi_reg esi
- popl_cfi_reg edi
- popl_cfi %ecx # equivalent to addl $4,%esp
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pushl %ss
++ popl %ds
++ pushl %ss
++ popl %es
++#endif
++
+ popl %ebx
+ popl %esi
+ popl %edi
+ popl %ecx # equivalent to addl $4,%esp
ret
- CFI_ENDPROC
-ENDPROC(csum_partial_copy_generic)
+ENDPROC(csum_partial_copy_generic_to_user)
@@ -29592,29 +30739,26 @@ index 9bc944a..e52be6c 100644
#define ARGBASE 12
-
--ENTRY(csum_partial_copy_generic)
+
+ENTRY(csum_partial_copy_generic_to_user)
- CFI_STARTPROC
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pushl_cfi %gs
-+ popl_cfi %es
++ pushl %gs
++ popl %es
+ jmp csum_partial_copy_generic
+#endif
+
+ENTRY(csum_partial_copy_generic_from_user)
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pushl_cfi %gs
-+ popl_cfi %ds
++ pushl %gs
++ popl %ds
+#endif
+
-+ENTRY(csum_partial_copy_generic)
- pushl_cfi_reg ebx
- pushl_cfi_reg edi
- pushl_cfi_reg esi
-@@ -444,7 +481,7 @@ ENTRY(csum_partial_copy_generic)
+ ENTRY(csum_partial_copy_generic)
+ pushl %ebx
+ pushl %edi
+@@ -435,7 +473,7 @@ ENTRY(csum_partial_copy_generic)
subl %ebx, %edi
lea -1(%esi),%edx
andl $-32,%edx
@@ -29623,7 +30767,7 @@ index 9bc944a..e52be6c 100644
testl %esi, %esi
jmp *%ebx
1: addl $64,%esi
-@@ -465,19 +502,19 @@ ENTRY(csum_partial_copy_generic)
+@@ -456,19 +494,19 @@ ENTRY(csum_partial_copy_generic)
jb 5f
SRC( movw (%esi), %dx )
leal 2(%esi), %esi
@@ -29646,7 +30790,7 @@ index 9bc944a..e52be6c 100644
# zero the complete destination (computing the rest is too much work)
movl ARGBASE+8(%esp),%edi # dst
movl ARGBASE+12(%esp),%ecx # len
-@@ -485,16 +522,23 @@ DST( movb %dl, (%edi) )
+@@ -476,15 +514,22 @@ DST( movb %dl, (%edi) )
rep; stosb
jmp 7b
6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
@@ -29656,101 +30800,95 @@ index 9bc944a..e52be6c 100644
.previous
+#ifdef CONFIG_PAX_MEMORY_UDEREF
-+ pushl_cfi %ss
-+ popl_cfi %ds
-+ pushl_cfi %ss
-+ popl_cfi %es
++ pushl %ss
++ popl %ds
++ pushl %ss
++ popl %es
+#endif
+
- popl_cfi_reg esi
- popl_cfi_reg edi
- popl_cfi_reg ebx
+ popl %esi
+ popl %edi
+ popl %ebx
ret
- CFI_ENDPROC
-ENDPROC(csum_partial_copy_generic)
+ENDPROC(csum_partial_copy_generic_to_user)
#undef ROUND
#undef ROUND1
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
-index e67e579..4782449 100644
+index a2fe51b..507dab0 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
-@@ -23,6 +23,7 @@ ENTRY(clear_page)
+@@ -21,6 +21,7 @@ ENTRY(clear_page)
movl $4096/8,%ecx
xorl %eax,%eax
rep stosq
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(clear_page)
-@@ -47,6 +48,7 @@ ENTRY(clear_page_orig)
+
+@@ -43,6 +44,7 @@ ENTRY(clear_page_orig)
leaq 64(%rdi),%rdi
jnz .Lloop
nop
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(clear_page_orig)
-@@ -56,6 +58,7 @@ ENTRY(clear_page_c_e)
+
+@@ -50,5 +52,6 @@ ENTRY(clear_page_c_e)
movl $4096,%ecx
xorl %eax,%eax
rep stosb
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(clear_page_c_e)
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
-index 40a1725..5d12ac4 100644
+index 9b33024..e52ee44 100644
--- a/arch/x86/lib/cmpxchg16b_emu.S
+++ b/arch/x86/lib/cmpxchg16b_emu.S
-@@ -8,6 +8,7 @@
+@@ -7,6 +7,7 @@
+ */
#include <linux/linkage.h>
- #include <asm/dwarf2.h>
#include <asm/percpu.h>
+#include <asm/alternative-asm.h>
.text
-@@ -46,12 +47,14 @@ CFI_STARTPROC
- CFI_REMEMBER_STATE
- popfq_cfi
+@@ -43,11 +44,13 @@ ENTRY(this_cpu_cmpxchg16b_emu)
+
+ popfq
mov $1, %al
+ pax_force_retaddr
ret
- CFI_RESTORE_STATE
.Lnot_same:
- popfq_cfi
+ popfq
xor %al,%al
+ pax_force_retaddr
ret
- CFI_ENDPROC
+ ENDPROC(this_cpu_cmpxchg16b_emu)
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
-index 8239dbc..e714d2a 100644
+index 009f982..9b3db5e 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
-@@ -17,6 +17,7 @@ ENTRY(copy_page)
+@@ -15,13 +15,14 @@ ENTRY(copy_page)
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx
rep movsq
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(copy_page)
-@@ -27,8 +28,8 @@ ENTRY(copy_page_regs)
- CFI_ADJUST_CFA_OFFSET 2*8
+
+ ENTRY(copy_page_regs)
+ subq $2*8, %rsp
movq %rbx, (%rsp)
- CFI_REL_OFFSET rbx, 0
- movq %r12, 1*8(%rsp)
-- CFI_REL_OFFSET r12, 1*8
+ movq %r13, 1*8(%rsp)
-+ CFI_REL_OFFSET r13, 1*8
movl $(4096/64)-5, %ecx
.p2align 4
-@@ -41,7 +42,7 @@ ENTRY(copy_page_regs)
+@@ -34,7 +35,7 @@ ENTRY(copy_page_regs)
movq 0x8*4(%rsi), %r9
movq 0x8*5(%rsi), %r10
movq 0x8*6(%rsi), %r11
@@ -29759,7 +30897,7 @@ index 8239dbc..e714d2a 100644
prefetcht0 5*64(%rsi)
-@@ -52,7 +53,7 @@ ENTRY(copy_page_regs)
+@@ -45,7 +46,7 @@ ENTRY(copy_page_regs)
movq %r9, 0x8*4(%rdi)
movq %r10, 0x8*5(%rdi)
movq %r11, 0x8*6(%rdi)
@@ -29768,7 +30906,7 @@ index 8239dbc..e714d2a 100644
leaq 64 (%rsi), %rsi
leaq 64 (%rdi), %rdi
-@@ -71,7 +72,7 @@ ENTRY(copy_page_regs)
+@@ -64,7 +65,7 @@ ENTRY(copy_page_regs)
movq 0x8*4(%rsi), %r9
movq 0x8*5(%rsi), %r10
movq 0x8*6(%rsi), %r11
@@ -29777,7 +30915,7 @@ index 8239dbc..e714d2a 100644
movq %rax, 0x8*0(%rdi)
movq %rbx, 0x8*1(%rdi)
-@@ -80,7 +81,7 @@ ENTRY(copy_page_regs)
+@@ -73,14 +74,15 @@ ENTRY(copy_page_regs)
movq %r9, 0x8*4(%rdi)
movq %r10, 0x8*5(%rdi)
movq %r11, 0x8*6(%rdi)
@@ -29786,39 +30924,26 @@ index 8239dbc..e714d2a 100644
leaq 64(%rdi), %rdi
leaq 64(%rsi), %rsi
-@@ -88,10 +89,11 @@ ENTRY(copy_page_regs)
+ jnz .Loop2
movq (%rsp), %rbx
- CFI_RESTORE rbx
- movq 1*8(%rsp), %r12
-- CFI_RESTORE r12
+ movq 1*8(%rsp), %r13
-+ CFI_RESTORE r13
addq $2*8, %rsp
- CFI_ADJUST_CFA_OFFSET -2*8
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(copy_page_regs)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
-index fa997df..060ab18 100644
+index 982ce34..8e14731 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
-@@ -15,6 +15,7 @@
+@@ -14,50 +14,7 @@
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
-+#include <asm/pgtable.h>
-
- .macro ALIGN_DESTINATION
- /* check for bad alignment of destination */
-@@ -40,56 +41,6 @@
- _ASM_EXTABLE(101b,103b)
- .endm
-
+-
-/* Standard copy_to_user with segment limit checking */
-ENTRY(_copy_to_user)
-- CFI_STARTPROC
- GET_THREAD_INFO(%rax)
- movq %rdi,%rcx
- addq %rdx,%rcx
@@ -29830,12 +30955,10 @@ index fa997df..060ab18 100644
- X86_FEATURE_REP_GOOD, \
- "jmp copy_user_enhanced_fast_string", \
- X86_FEATURE_ERMS
-- CFI_ENDPROC
-ENDPROC(_copy_to_user)
-
-/* Standard copy_from_user with segment limit checking */
-ENTRY(_copy_from_user)
-- CFI_STARTPROC
- GET_THREAD_INFO(%rax)
- movq %rsi,%rcx
- addq %rdx,%rcx
@@ -29847,14 +30970,12 @@ index fa997df..060ab18 100644
- X86_FEATURE_REP_GOOD, \
- "jmp copy_user_enhanced_fast_string", \
- X86_FEATURE_ERMS
-- CFI_ENDPROC
-ENDPROC(_copy_from_user)
-
- .section .fixup,"ax"
- /* must zero dest */
-ENTRY(bad_from_user)
-bad_from_user:
-- CFI_STARTPROC
- movl %edx,%ecx
- xorl %eax,%eax
- rep
@@ -29862,22 +30983,21 @@ index fa997df..060ab18 100644
-bad_to_user:
- movl %edx,%eax
- ret
-- CFI_ENDPROC
-ENDPROC(bad_from_user)
- .previous
--
++#include <asm/pgtable.h>
+
/*
* copy_user_generic_unrolled - memory copy with exception handling.
- * This version is for CPUs like P4 that don't have efficient micro
-@@ -105,6 +56,7 @@ ENDPROC(bad_from_user)
+@@ -73,6 +30,7 @@ ENDPROC(bad_from_user)
+ * eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_generic_unrolled)
- CFI_STARTPROC
+ ASM_PAX_OPEN_USERLAND
ASM_STAC
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
-@@ -154,6 +106,8 @@ ENTRY(copy_user_generic_unrolled)
+@@ -122,6 +80,8 @@ ENTRY(copy_user_generic_unrolled)
jnz 21b
23: xor %eax,%eax
ASM_CLAC
@@ -29886,15 +31006,15 @@ index fa997df..060ab18 100644
ret
.section .fixup,"ax"
-@@ -209,6 +163,7 @@ ENDPROC(copy_user_generic_unrolled)
+@@ -175,6 +135,7 @@ ENDPROC(copy_user_generic_unrolled)
+ * eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_generic_string)
- CFI_STARTPROC
+ ASM_PAX_OPEN_USERLAND
ASM_STAC
cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */
-@@ -223,6 +178,8 @@ ENTRY(copy_user_generic_string)
+@@ -189,6 +150,8 @@ ENTRY(copy_user_generic_string)
movsb
xorl %eax,%eax
ASM_CLAC
@@ -29903,10 +31023,10 @@ index fa997df..060ab18 100644
ret
.section .fixup,"ax"
-@@ -250,12 +207,15 @@ ENDPROC(copy_user_generic_string)
+@@ -214,12 +177,15 @@ ENDPROC(copy_user_generic_string)
+ * eax uncopied bytes or 0 if successful.
*/
ENTRY(copy_user_enhanced_fast_string)
- CFI_STARTPROC
+ ASM_PAX_OPEN_USERLAND
ASM_STAC
movl %edx,%ecx
@@ -29919,30 +31039,10 @@ index fa997df..060ab18 100644
ret
.section .fixup,"ax"
-diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
-index 6a4f43c..c70fb52 100644
---- a/arch/x86/lib/copy_user_nocache_64.S
-+++ b/arch/x86/lib/copy_user_nocache_64.S
-@@ -8,6 +8,7 @@
-
- #include <linux/linkage.h>
- #include <asm/dwarf2.h>
-+#include <asm/alternative-asm.h>
-
- #define FIX_ALIGNMENT 1
-
-@@ -16,6 +17,7 @@
- #include <asm/thread_info.h>
- #include <asm/asm.h>
- #include <asm/smap.h>
-+#include <asm/pgtable.h>
-
- .macro ALIGN_DESTINATION
- #ifdef FIX_ALIGNMENT
-@@ -49,6 +51,16 @@
+@@ -235,6 +201,16 @@ ENDPROC(copy_user_enhanced_fast_string)
+ * This will force destination/source out of cache for more performance.
*/
ENTRY(__copy_user_nocache)
- CFI_STARTPROC
+
+#ifdef CONFIG_PAX_MEMORY_UDEREF
+ mov pax_user_shadow_base,%rcx
@@ -29956,7 +31056,7 @@ index 6a4f43c..c70fb52 100644
ASM_STAC
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
-@@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
+@@ -284,7 +260,9 @@ ENTRY(__copy_user_nocache)
jnz 21b
23: xorl %eax,%eax
ASM_CLAC
@@ -29967,29 +31067,27 @@ index 6a4f43c..c70fb52 100644
.section .fixup,"ax"
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
-index 9734182..dbee61c 100644
+index 7e48807..cc966ff 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
-@@ -9,6 +9,7 @@
- #include <asm/dwarf2.h>
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/asm.h>
+#include <asm/alternative-asm.h>
/*
* Checksum copy with exception handling.
-@@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
- CFI_ADJUST_CFA_OFFSET 7*8
+@@ -52,7 +53,7 @@ ENTRY(csum_partial_copy_generic)
+ .Lignore:
+ subq $7*8, %rsp
movq %rbx, 2*8(%rsp)
- CFI_REL_OFFSET rbx, 2*8
- movq %r12, 3*8(%rsp)
-- CFI_REL_OFFSET r12, 3*8
+ movq %r15, 3*8(%rsp)
-+ CFI_REL_OFFSET r15, 3*8
movq %r14, 4*8(%rsp)
- CFI_REL_OFFSET r14, 4*8
movq %r13, 5*8(%rsp)
-@@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
+ movq %rbp, 6*8(%rsp)
+@@ -64,16 +65,16 @@ ENTRY(csum_partial_copy_generic)
movl %edx, %ecx
xorl %r9d, %r9d
@@ -30009,7 +31107,7 @@ index 9734182..dbee61c 100644
/* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
.p2align 4
.Lloop:
-@@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
+@@ -107,7 +108,7 @@ ENTRY(csum_partial_copy_generic)
adcq %r14, %rax
adcq %r13, %rax
@@ -30018,25 +31116,20 @@ index 9734182..dbee61c 100644
dest
movq %rbx, (%rsi)
-@@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
+@@ -200,11 +201,12 @@ ENTRY(csum_partial_copy_generic)
+
.Lende:
movq 2*8(%rsp), %rbx
- CFI_RESTORE rbx
- movq 3*8(%rsp), %r12
-- CFI_RESTORE r12
+ movq 3*8(%rsp), %r15
-+ CFI_RESTORE r15
movq 4*8(%rsp), %r14
- CFI_RESTORE r14
movq 5*8(%rsp), %r13
-@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
- CFI_RESTORE rbp
+ movq 6*8(%rsp), %rbp
addq $7*8, %rsp
- CFI_ADJUST_CFA_OFFSET -7*8
+ pax_force_retaddr
ret
- CFI_RESTORE_STATE
+ /* Exception handlers. Very simple, zeroing is done in the wrappers */
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 1318f75..44c30fd 100644
--- a/arch/x86/lib/csum-wrappers_64.c
@@ -30070,10 +31163,10 @@ index 1318f75..44c30fd 100644
}
EXPORT_SYMBOL(csum_partial_copy_to_user);
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
-index a451235..a74bfa3 100644
+index 46668cd..a3bdfb9 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
-@@ -33,17 +33,40 @@
+@@ -32,42 +32,93 @@
#include <asm/thread_info.h>
#include <asm/asm.h>
#include <asm/smap.h>
@@ -30089,7 +31182,6 @@ index a451235..a74bfa3 100644
.text
ENTRY(__get_user_1)
- CFI_STARTPROC
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
GET_THREAD_INFO(%_ASM_DX)
@@ -30113,11 +31205,9 @@ index a451235..a74bfa3 100644
ASM_CLAC
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(__get_user_1)
-@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
+
ENTRY(__get_user_2)
- CFI_STARTPROC
add $1,%_ASM_AX
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
@@ -30143,11 +31233,9 @@ index a451235..a74bfa3 100644
ASM_CLAC
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(__get_user_2)
-@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
+
ENTRY(__get_user_4)
- CFI_STARTPROC
add $3,%_ASM_AX
+
+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
@@ -30173,9 +31261,9 @@ index a451235..a74bfa3 100644
ASM_CLAC
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(__get_user_4)
-@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
+
+@@ -78,10 +129,20 @@ ENTRY(__get_user_8)
GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
@@ -30196,7 +31284,7 @@ index a451235..a74bfa3 100644
ret
#else
add $7,%_ASM_AX
-@@ -98,10 +159,11 @@ ENTRY(__get_user_8)
+@@ -90,10 +151,11 @@ ENTRY(__get_user_8)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user_8
ASM_STAC
@@ -30209,25 +31297,25 @@ index a451235..a74bfa3 100644
+ pax_force_retaddr
ret
#endif
- CFI_ENDPROC
-@@ -113,6 +175,7 @@ bad_get_user:
+ ENDPROC(__get_user_8)
+@@ -103,6 +165,7 @@ bad_get_user:
xor %edx,%edx
mov $(-EFAULT),%_ASM_AX
ASM_CLAC
+ pax_force_retaddr
ret
- CFI_ENDPROC
END(bad_get_user)
-@@ -124,6 +187,7 @@ bad_get_user_8:
+
+@@ -112,6 +175,7 @@ bad_get_user_8:
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX
ASM_CLAC
+ pax_force_retaddr
ret
- CFI_ENDPROC
END(bad_get_user_8)
+ #endif
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
-index 8f72b33..a43d9969 100644
+index 8f72b33..4667a46 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -20,8 +20,10 @@
@@ -30248,37 +31336,36 @@ index 8f72b33..a43d9969 100644
- insn->kaddr = kaddr;
- insn->end_kaddr = kaddr + buf_len;
- insn->next_byte = kaddr;
-+ insn->kaddr = ktla_ktva(kaddr);
++ insn->kaddr = (void *)ktla_ktva((unsigned long)kaddr);
+ insn->end_kaddr = insn->kaddr + buf_len;
+ insn->next_byte = insn->kaddr;
insn->x86_64 = x86_64 ? 1 : 0;
insn->opnd_bytes = 4;
if (x86_64)
diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
-index 05a95e7..326f2fa 100644
+index 33147fe..12a8815 100644
--- a/arch/x86/lib/iomap_copy_64.S
+++ b/arch/x86/lib/iomap_copy_64.S
-@@ -17,6 +17,7 @@
+@@ -16,6 +16,7 @@
+ */
#include <linux/linkage.h>
- #include <asm/dwarf2.h>
+#include <asm/alternative-asm.h>
/*
* override generic version in lib/iomap_copy.c
-@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
- CFI_STARTPROC
+@@ -23,5 +24,6 @@
+ ENTRY(__iowrite32_copy)
movl %edx,%ecx
rep movsd
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(__iowrite32_copy)
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
-index b046664..dec9465 100644
+index 16698bb..971d300 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
-@@ -37,6 +37,7 @@ ENTRY(memcpy)
+@@ -36,6 +36,7 @@ ENTRY(memcpy)
rep movsq
movl %edx, %ecx
rep movsb
@@ -30286,7 +31373,7 @@ index b046664..dec9465 100644
ret
ENDPROC(memcpy)
ENDPROC(__memcpy)
-@@ -49,6 +50,7 @@ ENTRY(memcpy_erms)
+@@ -48,6 +49,7 @@ ENTRY(memcpy_erms)
movq %rdi, %rax
movq %rdx, %rcx
rep movsb
@@ -30294,7 +31381,7 @@ index b046664..dec9465 100644
ret
ENDPROC(memcpy_erms)
-@@ -134,6 +136,7 @@ ENTRY(memcpy_orig)
+@@ -132,6 +134,7 @@ ENTRY(memcpy_orig)
movq %r9, 1*8(%rdi)
movq %r10, -2*8(%rdi, %rdx)
movq %r11, -1*8(%rdi, %rdx)
@@ -30302,7 +31389,7 @@ index b046664..dec9465 100644
retq
.p2align 4
.Lless_16bytes:
-@@ -146,6 +149,7 @@ ENTRY(memcpy_orig)
+@@ -144,6 +147,7 @@ ENTRY(memcpy_orig)
movq -1*8(%rsi, %rdx), %r9
movq %r8, 0*8(%rdi)
movq %r9, -1*8(%rdi, %rdx)
@@ -30310,7 +31397,7 @@ index b046664..dec9465 100644
retq
.p2align 4
.Lless_8bytes:
-@@ -159,6 +163,7 @@ ENTRY(memcpy_orig)
+@@ -157,6 +161,7 @@ ENTRY(memcpy_orig)
movl -4(%rsi, %rdx), %r8d
movl %ecx, (%rdi)
movl %r8d, -4(%rdi, %rdx)
@@ -30318,19 +31405,18 @@ index b046664..dec9465 100644
retq
.p2align 4
.Lless_3bytes:
-@@ -177,6 +182,7 @@ ENTRY(memcpy_orig)
+@@ -175,5 +180,6 @@ ENTRY(memcpy_orig)
movb %cl, (%rdi)
.Lend:
+ pax_force_retaddr
retq
- CFI_ENDPROC
ENDPROC(memcpy_orig)
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
-index 0f8a0d0..f6e0ea4 100644
+index ca2afdd..2e474fa 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
-@@ -43,7 +43,7 @@ ENTRY(__memmove)
+@@ -41,7 +41,7 @@ ENTRY(__memmove)
jg 2f
.Lmemmove_begin_forward:
@@ -30339,19 +31425,19 @@ index 0f8a0d0..f6e0ea4 100644
/*
* movsq instruction have many startup latency
-@@ -206,6 +206,7 @@ ENTRY(__memmove)
+@@ -204,6 +204,7 @@ ENTRY(__memmove)
movb (%rsi), %r11b
movb %r11b, (%rdi)
13:
+ pax_force_retaddr
retq
- CFI_ENDPROC
ENDPROC(__memmove)
+ ENDPROC(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
-index 93118fb..386ed2a 100644
+index 2661fad..b584d5c 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
-@@ -41,6 +41,7 @@ ENTRY(__memset)
+@@ -40,6 +40,7 @@ ENTRY(__memset)
movl %edx,%ecx
rep stosb
movq %r9,%rax
@@ -30359,7 +31445,7 @@ index 93118fb..386ed2a 100644
ret
ENDPROC(memset)
ENDPROC(__memset)
-@@ -62,6 +63,7 @@ ENTRY(memset_erms)
+@@ -61,6 +62,7 @@ ENTRY(memset_erms)
movq %rdx,%rcx
rep stosb
movq %r9,%rax
@@ -30367,16 +31453,16 @@ index 93118fb..386ed2a 100644
ret
ENDPROC(memset_erms)
-@@ -126,6 +128,7 @@ ENTRY(memset_orig)
+@@ -123,6 +125,7 @@ ENTRY(memset_orig)
.Lende:
movq %r10,%rax
+ pax_force_retaddr
ret
- CFI_RESTORE_STATE
+ .Lbad_alignment:
diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
-index c9f2d9b..e7fd2c0 100644
+index e5e3ed8..d7c08c2 100644
--- a/arch/x86/lib/mmx_32.c
+++ b/arch/x86/lib/mmx_32.c
@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
@@ -30695,30 +31781,30 @@ index c9f2d9b..e7fd2c0 100644
from += 64;
to += 64;
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
-index 3ca5218..c2ae6bc 100644
+index c815564..303dcfa 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
-@@ -3,6 +3,7 @@
- #include <asm/dwarf2.h>
+@@ -2,6 +2,7 @@
+ #include <linux/errno.h>
#include <asm/asm.h>
#include <asm/msr.h>
+#include <asm/alternative-asm.h>
#ifdef CONFIG_X86_64
/*
-@@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
+@@ -34,6 +35,7 @@ ENTRY(\op\()_safe_regs)
movl %edi, 28(%r10)
- popq_cfi_reg rbp
- popq_cfi_reg rbx
+ popq %rbp
+ popq %rbx
+ pax_force_retaddr
ret
3:
- CFI_RESTORE_STATE
+ movl $-EIO, %r11d
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
-index fc6ba17..14ad9a5 100644
+index e0817a1..bc9cf66 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
-@@ -16,7 +16,9 @@
+@@ -15,7 +15,9 @@
#include <asm/errno.h>
#include <asm/asm.h>
#include <asm/smap.h>
@@ -30729,19 +31815,16 @@ index fc6ba17..14ad9a5 100644
/*
* __put_user_X
-@@ -30,57 +32,125 @@
+@@ -29,55 +31,124 @@
* as they get called from within inline assembly.
*/
--#define ENTER CFI_STARTPROC ; \
-- GET_THREAD_INFO(%_ASM_BX)
+-#define ENTER GET_THREAD_INFO(%_ASM_BX)
-#define EXIT ASM_CLAC ; \
-- ret ; \
-+#define ENTER CFI_STARTPROC
++#define ENTER
+#define EXIT ASM_CLAC ; \
+ pax_force_retaddr ; \
-+ ret ; \
- CFI_ENDPROC
+ ret
+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
+#define _DEST %_ASM_CX,%_ASM_BX
@@ -30865,78 +31948,44 @@ index fc6ba17..14ad9a5 100644
xor %eax,%eax
EXIT
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
-index 2322abe..1e78a75 100644
+index 40027db..37bb69d 100644
--- a/arch/x86/lib/rwsem.S
+++ b/arch/x86/lib/rwsem.S
-@@ -92,6 +92,7 @@ ENTRY(call_rwsem_down_read_failed)
+@@ -90,6 +90,7 @@ ENTRY(call_rwsem_down_read_failed)
call rwsem_down_read_failed
- __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
+ __ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(call_rwsem_down_read_failed)
-@@ -102,6 +103,7 @@ ENTRY(call_rwsem_down_write_failed)
+
+@@ -98,6 +99,7 @@ ENTRY(call_rwsem_down_write_failed)
movq %rax,%rdi
call rwsem_down_write_failed
restore_common_regs
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(call_rwsem_down_write_failed)
-@@ -115,7 +117,8 @@ ENTRY(call_rwsem_wake)
+
+@@ -109,7 +111,8 @@ ENTRY(call_rwsem_wake)
movq %rax,%rdi
call rwsem_wake
restore_common_regs
-1: ret
+1: pax_force_retaddr
+ ret
- CFI_ENDPROC
ENDPROC(call_rwsem_wake)
-@@ -127,6 +130,7 @@ ENTRY(call_rwsem_downgrade_wake)
+ ENTRY(call_rwsem_downgrade_wake)
+@@ -119,5 +122,6 @@ ENTRY(call_rwsem_downgrade_wake)
call rwsem_downgrade_wake
- __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
+ __ASM_SIZE(pop,) %__ASM_REG(dx)
restore_common_regs
+ pax_force_retaddr
ret
- CFI_ENDPROC
ENDPROC(call_rwsem_downgrade_wake)
-diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
-index f89ba4e9..512b2de 100644
---- a/arch/x86/lib/thunk_64.S
-+++ b/arch/x86/lib/thunk_64.S
-@@ -9,6 +9,7 @@
- #include <asm/dwarf2.h>
- #include <asm/calling.h>
- #include <asm/asm.h>
-+#include <asm/alternative-asm.h>
-
- /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
- .macro THUNK name, func, put_ret_addr_in_rdi=0
-@@ -69,6 +70,7 @@ restore:
- popq_cfi_reg rdx
- popq_cfi_reg rsi
- popq_cfi_reg rdi
-+ pax_force_retaddr
- ret
- CFI_ENDPROC
- _ASM_NOKPROBE(restore)
-diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
-index ddf9ecb..e342586 100644
---- a/arch/x86/lib/usercopy.c
-+++ b/arch/x86/lib/usercopy.c
-@@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
- unsigned long ret;
-
- if (__range_not_ok(from, n, TASK_SIZE))
-- return 0;
-+ return n;
-
- /*
- * Even though this function is typically called from NMI/IRQ context
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
-index e2f5e21..4b22130 100644
+index 91d93b9..4b22130 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -42,11 +42,13 @@ do { \
@@ -31433,7 +32482,7 @@ index e2f5e21..4b22130 100644
clac();
return n;
}
-@@ -632,58 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
+@@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
if (n > 64 && cpu_has_xmm2)
n = __copy_user_intel_nocache(to, from, n);
else
@@ -31454,7 +32503,8 @@ index e2f5e21..4b22130 100644
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
+- * Context: User context only. This function may sleep if pagefaults are
+- * enabled.
- *
- * Copy data from kernel space to user space.
- *
@@ -31491,7 +32541,8 @@ index e2f5e21..4b22130 100644
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
+- * Context: User context only. This function may sleep if pagefaults are
+- * enabled.
- *
- * Copy data from user space to kernel space.
- *
@@ -31580,6 +32631,54 @@ index 0a42327..7a82465 100644
memset(to, 0, len);
return len;
}
+diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c
+index dd76a05..df65688 100644
+--- a/arch/x86/math-emu/fpu_aux.c
++++ b/arch/x86/math-emu/fpu_aux.c
+@@ -52,7 +52,7 @@ void fpstate_init_soft(struct swregs_state *soft)
+
+ void finit(void)
+ {
+- fpstate_init_soft(&current->thread.fpu.state.soft);
++ fpstate_init_soft(&current->thread.fpu.state->soft);
+ }
+
+ /*
+diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
+index 3d8f2e4..ef7cf4e 100644
+--- a/arch/x86/math-emu/fpu_entry.c
++++ b/arch/x86/math-emu/fpu_entry.c
+@@ -677,7 +677,7 @@ int fpregs_soft_set(struct task_struct *target,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+ {
+- struct swregs_state *s387 = &target->thread.fpu.state.soft;
++ struct swregs_state *s387 = &target->thread.fpu.state->soft;
+ void *space = s387->st_space;
+ int ret;
+ int offset, other, i, tags, regnr, tag, newtop;
+@@ -729,7 +729,7 @@ int fpregs_soft_get(struct task_struct *target,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+ {
+- struct swregs_state *s387 = &target->thread.fpu.state.soft;
++ struct swregs_state *s387 = &target->thread.fpu.state->soft;
+ const void *space = s387->st_space;
+ int ret;
+ int offset = (S387->ftop & 7) * 10, other = 80 - offset;
+diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
+index 5e044d5..d342fce 100644
+--- a/arch/x86/math-emu/fpu_system.h
++++ b/arch/x86/math-emu/fpu_system.h
+@@ -46,7 +46,7 @@ static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+ #define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \
+ == (1 << 10))
+
+-#define I387 (&current->thread.fpu.state)
++#define I387 (current->thread.fpu.state)
+ #define FPU_info (I387->soft.info)
+
+ #define FPU_CS (*(unsigned short *) &(FPU_info->regs->cs))
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index a482d10..1a6edb5 100644
--- a/arch/x86/mm/Makefile
@@ -31593,10 +32692,15 @@ index a482d10..1a6edb5 100644
+obj-$(CONFIG_X86_64) += uderef_64.o
+CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) -fcall-saved-rax
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
-index 903ec1e..c4166b2 100644
+index 903ec1e..41b4708 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
-@@ -6,12 +6,24 @@
+@@ -2,16 +2,29 @@
+ #include <linux/spinlock.h>
+ #include <linux/sort.h>
+ #include <asm/uaccess.h>
++#include <asm/boot.h>
+
static inline unsigned long
ex_insn_addr(const struct exception_table_entry *x)
{
@@ -31623,7 +32727,7 @@ index 903ec1e..c4166b2 100644
}
int fixup_exception(struct pt_regs *regs)
-@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
+@@ -20,7 +33,7 @@ int fixup_exception(struct pt_regs *regs)
unsigned long new_ip;
#ifdef CONFIG_PNPBIOS
@@ -31632,7 +32736,7 @@ index 903ec1e..c4166b2 100644
extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
extern u32 pnp_bios_is_utter_crap;
pnp_bios_is_utter_crap = 1;
-@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
+@@ -145,6 +158,13 @@ void sort_extable(struct exception_table_entry *start,
i += 4;
p->fixup -= i;
i += 4;
@@ -31647,13 +32751,13 @@ index 903ec1e..c4166b2 100644
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
-index 181c53b..d336596 100644
+index 9dc9098..938251a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
-@@ -13,12 +13,19 @@
- #include <linux/hugetlb.h> /* hstate_index_to_shift */
+@@ -14,12 +14,19 @@
#include <linux/prefetch.h> /* prefetchw */
#include <linux/context_tracking.h> /* exception_enter(), ... */
+ #include <linux/uaccess.h> /* faulthandler_disabled() */
+#include <linux/unistd.h>
+#include <linux/compiler.h>
@@ -31670,7 +32774,7 @@ index 181c53b..d336596 100644
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>
-@@ -120,7 +127,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
+@@ -121,7 +128,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
return !instr_lo || (instr_lo>>1) == 1;
case 0x00:
/* Prefetch instruction is 0x0F0D or 0x0F18 */
@@ -31682,7 +32786,7 @@ index 181c53b..d336596 100644
return 0;
*prefetch = (instr_lo == 0xF) &&
-@@ -154,7 +164,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
+@@ -155,7 +165,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
while (instr < max_instr) {
unsigned char opcode;
@@ -31694,7 +32798,7 @@ index 181c53b..d336596 100644
break;
instr++;
-@@ -185,6 +198,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
+@@ -186,6 +199,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
force_sig_info(si_signo, &info, tsk);
}
@@ -31729,7 +32833,7 @@ index 181c53b..d336596 100644
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);
-@@ -235,10 +276,27 @@ void vmalloc_sync_all(void)
+@@ -236,10 +277,27 @@ void vmalloc_sync_all(void)
for (address = VMALLOC_START & PMD_MASK;
address >= TASK_SIZE && address < FIXADDR_TOP;
address += PMD_SIZE) {
@@ -31757,7 +32861,7 @@ index 181c53b..d336596 100644
spinlock_t *pgt_lock;
pmd_t *ret;
-@@ -246,8 +304,14 @@ void vmalloc_sync_all(void)
+@@ -247,8 +305,14 @@ void vmalloc_sync_all(void)
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
@@ -31773,7 +32877,7 @@ index 181c53b..d336596 100644
if (!ret)
break;
-@@ -281,6 +345,12 @@ static noinline int vmalloc_fault(unsigned long address)
+@@ -282,6 +346,12 @@ static noinline int vmalloc_fault(unsigned long address)
* an interrupt in the middle of a task switch..
*/
pgd_paddr = read_cr3();
@@ -31786,7 +32890,7 @@ index 181c53b..d336596 100644
pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
if (!pmd_k)
return -1;
-@@ -377,11 +447,25 @@ static noinline int vmalloc_fault(unsigned long address)
+@@ -378,11 +448,25 @@ static noinline int vmalloc_fault(unsigned long address)
* happen within a race in page table update. In the later
* case just flush:
*/
@@ -31813,7 +32917,7 @@ index 181c53b..d336596 100644
if (pgd_none(*pgd)) {
set_pgd(pgd, *pgd_ref);
arch_flush_lazy_mmu_mode();
-@@ -548,7 +632,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
+@@ -549,7 +633,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
@@ -31822,7 +32926,7 @@ index 181c53b..d336596 100644
return 1;
#endif
return 0;
-@@ -575,9 +659,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
+@@ -576,9 +660,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
}
static const char nx_warning[] = KERN_CRIT
@@ -31834,7 +32938,7 @@ index 181c53b..d336596 100644
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
-@@ -586,7 +670,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -587,7 +671,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
if (!oops_may_print())
return;
@@ -31843,7 +32947,7 @@ index 181c53b..d336596 100644
unsigned int level;
pgd_t *pgd;
pte_t *pte;
-@@ -597,13 +681,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+@@ -598,13 +682,25 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
pte = lookup_address_in_pgd(pgd, address, &level);
if (pte && pte_present(*pte) && !pte_exec(*pte))
@@ -31871,7 +32975,7 @@ index 181c53b..d336596 100644
printk(KERN_ALERT "BUG: unable to handle kernel ");
if (address < PAGE_SIZE)
printk(KERN_CONT "NULL pointer dereference");
-@@ -782,6 +878,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+@@ -783,6 +879,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
return;
}
#endif
@@ -31894,7 +32998,7 @@ index 181c53b..d336596 100644
/* Kernel addresses are always protection faults: */
if (address >= TASK_SIZE)
error_code |= PF_PROT;
-@@ -864,7 +976,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
+@@ -865,7 +977,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
printk(KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
@@ -31903,7 +33007,7 @@ index 181c53b..d336596 100644
code = BUS_MCEERR_AR;
}
#endif
-@@ -916,6 +1028,107 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
+@@ -917,6 +1029,107 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
return 1;
}
@@ -32011,7 +33115,7 @@ index 181c53b..d336596 100644
/*
* Handle a spurious fault caused by a stale TLB entry.
*
-@@ -1001,6 +1214,9 @@ int show_unhandled_signals = 1;
+@@ -1002,6 +1215,9 @@ int show_unhandled_signals = 1;
static inline int
access_error(unsigned long error_code, struct vm_area_struct *vma)
{
@@ -32021,7 +33125,7 @@ index 181c53b..d336596 100644
if (error_code & PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
-@@ -1063,6 +1279,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
+@@ -1064,6 +1280,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
tsk = current;
mm = tsk->mm;
@@ -32044,7 +33148,7 @@ index 181c53b..d336596 100644
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
-@@ -1187,6 +1419,11 @@ retry:
+@@ -1188,6 +1420,11 @@ retry:
might_sleep();
}
@@ -32056,7 +33160,7 @@ index 181c53b..d336596 100644
vma = find_vma(mm, address);
if (unlikely(!vma)) {
bad_area(regs, error_code, address);
-@@ -1198,18 +1435,24 @@ retry:
+@@ -1199,18 +1436,24 @@ retry:
bad_area(regs, error_code, address);
return;
}
@@ -32092,7 +33196,7 @@ index 181c53b..d336596 100644
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return;
-@@ -1329,3 +1572,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
+@@ -1330,3 +1573,292 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
}
NOKPROBE_SYMBOL(trace_do_page_fault);
#endif /* CONFIG_TRACING */
@@ -32410,17 +33514,15 @@ index 81bf3d2..7ef25c2 100644
* XXX: batch / limit 'nr', to avoid large irq off latency
* needs some instrumenting to determine the common sizes used by
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
-index 4500142..53a363c 100644
+index eecb207a..ad42a30 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
-@@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+@@ -45,7 +45,9 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
+
-+ pax_open_kernel();
set_pte(kmap_pte-idx, mk_pte(page, prot));
-+ pax_close_kernel();
+
arch_flush_lazy_mmu_mode();
@@ -32528,7 +33630,7 @@ index 42982b2..7168fc3 100644
#endif /* CONFIG_HUGETLB_PAGE */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
-index 1d55318..d58fd6a 100644
+index 8533b46..8c83176 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -4,6 +4,7 @@
@@ -32609,7 +33711,7 @@ index 1d55318..d58fd6a 100644
if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
return 0;
if (!page_is_ram(pagenr))
-@@ -680,8 +724,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+@@ -680,8 +724,127 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
#endif
}
@@ -32685,7 +33787,7 @@ index 1d55318..d58fd6a 100644
+ pgd = pgd_offset_k(addr);
+ pud = pud_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
+ }
+*/
+#endif
@@ -32696,16 +33798,23 @@ index 1d55318..d58fd6a 100644
+
+#else
+ /* PaX: make kernel code/rodata read-only, rest non-executable */
++ set_memory_ro((unsigned long)_text, ((unsigned long)(_sdata - _text) >> PAGE_SHIFT));
++ set_memory_nx((unsigned long)_sdata, (__START_KERNEL_map + KERNEL_IMAGE_SIZE - (unsigned long)_sdata) >> PAGE_SHIFT);
++
+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
+ pgd = pgd_offset_k(addr);
+ pud = pud_offset(pgd, addr);
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ continue;
++ if (addr >= (unsigned long)_text)
++ BUG_ON(!pmd_large(*pmd));
+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ BUG_ON(pmd_write(*pmd));
++// set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
+ else
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
++ BUG_ON(!(pmd_flags(*pmd) & _PAGE_NX));
++// set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
+ }
+
+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
@@ -32716,8 +33825,11 @@ index 1d55318..d58fd6a 100644
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ continue;
++ if (addr >= (unsigned long)_text)
++ BUG_ON(!pmd_large(*pmd));
+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
-+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ BUG_ON(pmd_write(*pmd));
++// set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
+ }
+#endif
+
@@ -32728,7 +33840,7 @@ index 1d55318..d58fd6a 100644
(unsigned long)(&__init_begin),
(unsigned long)(&__init_end));
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
-index c8140e1..59257fc 100644
+index 68aec42..95ad5d3 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
@@ -32794,7 +33906,7 @@ index c8140e1..59257fc 100644
pmd_t * __init populate_extra_pmd(unsigned long vaddr)
{
int pgd_idx = pgd_index(vaddr);
-@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+@@ -209,6 +197,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
int pgd_idx, pmd_idx;
unsigned long vaddr;
pgd_t *pgd;
@@ -32802,7 +33914,7 @@ index c8140e1..59257fc 100644
pmd_t *pmd;
pte_t *pte = NULL;
unsigned long count = page_table_range_init_count(start, end);
-@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+@@ -223,8 +212,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
pgd = pgd_base + pgd_idx;
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
@@ -32818,7 +33930,7 @@ index c8140e1..59257fc 100644
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
pmd++, pmd_idx++) {
pte = page_table_kmap_check(one_page_table_init(pmd),
-@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
+@@ -236,11 +230,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
}
}
@@ -32843,7 +33955,7 @@ index c8140e1..59257fc 100644
}
/*
-@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
+@@ -257,9 +260,10 @@ kernel_physical_mapping_init(unsigned long start,
unsigned long last_map_addr = end;
unsigned long start_pfn, end_pfn;
pgd_t *pgd_base = swapper_pg_dir;
@@ -32855,7 +33967,7 @@ index c8140e1..59257fc 100644
pmd_t *pmd;
pte_t *pte;
unsigned pages_2m, pages_4k;
-@@ -291,8 +295,13 @@ repeat:
+@@ -292,8 +296,13 @@ repeat:
pfn = start_pfn;
pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
pgd = pgd_base + pgd_idx;
@@ -32871,7 +33983,7 @@ index c8140e1..59257fc 100644
if (pfn >= end_pfn)
continue;
-@@ -304,14 +313,13 @@ repeat:
+@@ -305,14 +314,13 @@ repeat:
#endif
for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
pmd++, pmd_idx++) {
@@ -32887,7 +33999,7 @@ index c8140e1..59257fc 100644
pgprot_t prot = PAGE_KERNEL_LARGE;
/*
* first pass will use the same initial
-@@ -322,11 +330,7 @@ repeat:
+@@ -323,11 +331,7 @@ repeat:
_PAGE_PSE);
pfn &= PMD_MASK >> PAGE_SHIFT;
@@ -32900,7 +34012,7 @@ index c8140e1..59257fc 100644
prot = PAGE_KERNEL_LARGE_EXEC;
pages_2m++;
-@@ -343,7 +347,7 @@ repeat:
+@@ -344,7 +348,7 @@ repeat:
pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
pte += pte_ofs;
for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
@@ -32909,7 +34021,7 @@ index c8140e1..59257fc 100644
pgprot_t prot = PAGE_KERNEL;
/*
* first pass will use the same initial
-@@ -351,7 +355,7 @@ repeat:
+@@ -352,7 +356,7 @@ repeat:
*/
pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
@@ -32918,7 +34030,7 @@ index c8140e1..59257fc 100644
prot = PAGE_KERNEL_EXEC;
pages_4k++;
-@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
+@@ -475,7 +479,7 @@ void __init native_pagetable_init(void)
pud = pud_offset(pgd, va);
pmd = pmd_offset(pud, va);
@@ -32927,7 +34039,7 @@ index c8140e1..59257fc 100644
break;
/* should not be large page here */
-@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
+@@ -533,12 +537,10 @@ void __init early_ioremap_page_table_range_init(void)
static void __init pagetable_init(void)
{
@@ -32942,7 +34054,7 @@ index c8140e1..59257fc 100644
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* user-defined highmem size */
-@@ -787,10 +789,10 @@ void __init mem_init(void)
+@@ -788,10 +790,10 @@ void __init mem_init(void)
((unsigned long)&__init_end -
(unsigned long)&__init_begin) >> 10,
@@ -32956,7 +34068,7 @@ index c8140e1..59257fc 100644
((unsigned long)&_etext - (unsigned long)&_text) >> 10);
/*
-@@ -884,6 +886,7 @@ void set_kernel_text_rw(void)
+@@ -885,6 +887,7 @@ void set_kernel_text_rw(void)
if (!kernel_set_to_readonly)
return;
@@ -32964,7 +34076,7 @@ index c8140e1..59257fc 100644
pr_debug("Set kernel text: %lx - %lx for read write\n",
start, start+size);
-@@ -898,6 +901,7 @@ void set_kernel_text_ro(void)
+@@ -899,6 +902,7 @@ void set_kernel_text_ro(void)
if (!kernel_set_to_readonly)
return;
@@ -32972,7 +34084,7 @@ index c8140e1..59257fc 100644
pr_debug("Set kernel text: %lx - %lx for read only\n",
start, start+size);
-@@ -926,6 +930,7 @@ void mark_rodata_ro(void)
+@@ -927,6 +931,7 @@ void mark_rodata_ro(void)
unsigned long start = PFN_ALIGN(_text);
unsigned long size = PFN_ALIGN(_etext) - start;
@@ -32981,7 +34093,7 @@ index c8140e1..59257fc 100644
printk(KERN_INFO "Write protecting the kernel text: %luk\n",
size >> 10);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
-index 3fba623..5ee9802 100644
+index 3fba623..a5d0500 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -136,7 +136,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
@@ -33069,17 +34181,7 @@ index 3fba623..5ee9802 100644
if (pmd != pmd_offset(pud, 0))
printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
pmd, pmd_offset(pud, 0));
-@@ -275,7 +303,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
- pmd = fill_pmd(pud, vaddr);
- pte = fill_pte(pmd, vaddr);
-
-+ pax_open_kernel();
- set_pte(pte, new_pte);
-+ pax_close_kernel();
-
- /*
- * It's enough to flush this one mapping.
-@@ -337,14 +367,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
+@@ -337,14 +365,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
pgd = pgd_offset_k((unsigned long)__va(phys));
if (pgd_none(*pgd)) {
pud = (pud_t *) spp_getpage();
@@ -33096,7 +34198,7 @@ index 3fba623..5ee9802 100644
}
pmd = pmd_offset(pud, phys);
BUG_ON(!pmd_none(*pmd));
-@@ -585,7 +613,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
+@@ -585,7 +611,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
prot);
spin_lock(&init_mm.page_table_lock);
@@ -33105,7 +34207,7 @@ index 3fba623..5ee9802 100644
spin_unlock(&init_mm.page_table_lock);
}
__flush_tlb_all();
-@@ -626,7 +654,7 @@ kernel_physical_mapping_init(unsigned long start,
+@@ -626,7 +652,7 @@ kernel_physical_mapping_init(unsigned long start,
page_size_mask);
spin_lock(&init_mm.page_table_lock);
@@ -33114,27 +34216,11 @@ index 3fba623..5ee9802 100644
spin_unlock(&init_mm.page_table_lock);
pgd_changed = true;
}
-diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
-index 9ca35fc..4b2b7b7 100644
---- a/arch/x86/mm/iomap_32.c
-+++ b/arch/x86/mm/iomap_32.c
-@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+
-+ pax_open_kernel();
- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
-+ pax_close_kernel();
-+
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
-index 70e7444..e9904fd 100644
+index b9c78f3..9ca7e24 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
-@@ -56,12 +56,10 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+@@ -59,8 +59,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
unsigned long i;
for (i = 0; i < nr_pages; ++i)
@@ -33144,50 +34230,8 @@ index 70e7444..e9904fd 100644
+ !PageReserved(pfn_to_page(start_pfn + i))))
return 1;
-- WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
--
return 0;
- }
-
-@@ -91,7 +89,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
- pgprot_t prot;
- int retval;
- void __iomem *ret_addr;
-- int ram_region;
-
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
-@@ -114,23 +111,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
- /*
- * Don't allow anybody to remap normal RAM that we're using..
- */
-- /* First check if whole region can be identified as RAM or not */
-- ram_region = region_is_ram(phys_addr, size);
-- if (ram_region > 0) {
-- WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
-- (unsigned long int)phys_addr,
-- (unsigned long int)last_addr);
-+ pfn = phys_addr >> PAGE_SHIFT;
-+ last_pfn = last_addr >> PAGE_SHIFT;
-+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
-+ __ioremap_check_ram) == 1) {
-+ WARN_ONCE(1, "ioremap on RAM at 0x%llx - 0x%llx\n",
-+ phys_addr, last_addr);
- return NULL;
- }
-
-- /* If could not be identified(-1), check page by page */
-- if (ram_region < 0) {
-- pfn = phys_addr >> PAGE_SHIFT;
-- last_pfn = last_addr >> PAGE_SHIFT;
-- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
-- __ioremap_check_ram) == 1)
-- return NULL;
-- }
- /*
- * Mappings have to be page-aligned
- */
-@@ -288,7 +277,7 @@ EXPORT_SYMBOL(ioremap_prot);
+@@ -332,7 +332,7 @@ EXPORT_SYMBOL(ioremap_prot);
*
* Caller must ensure there is only one unmapping for the same pointer.
*/
@@ -33196,13 +34240,13 @@ index 70e7444..e9904fd 100644
{
struct vm_struct *p, *o;
-@@ -351,32 +340,36 @@ int arch_ioremap_pmd_supported(void)
+@@ -395,31 +395,37 @@ int __init arch_ioremap_pmd_supported(void)
*/
void *xlate_dev_mem_ptr(phys_addr_t phys)
{
- unsigned long start = phys & PAGE_MASK;
- unsigned long offset = phys & ~PAGE_MASK;
-- unsigned long vaddr;
+- void *vaddr;
+ phys_addr_t pfn = phys >> PAGE_SHIFT;
- /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
@@ -33217,12 +34261,12 @@ index 70e7444..e9904fd 100644
+ return __va(phys);
+ }
-- vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
+- vaddr = ioremap_cache(start, PAGE_SIZE);
- /* Only add the offset on success and return NULL if the ioremap() failed: */
- if (vaddr)
- vaddr += offset;
-
-- return (void *)vaddr;
+- return vaddr;
+ return (void __force *)ioremap_cache(phys, 1);
}
@@ -33240,22 +34284,29 @@ index 70e7444..e9904fd 100644
+ }
- iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
-- return;
+ iounmap((void __iomem __force *)addr);
}
-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
-+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
++static pte_t __bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
++static pte_t *bm_pte __read_only = __bm_pte;
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
{
-@@ -412,8 +405,7 @@ void __init early_ioremap_init(void)
+@@ -455,8 +461,14 @@ void __init early_ioremap_init(void)
early_ioremap_setup();
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
- memset(bm_pte, 0, sizeof(bm_pte));
- pmd_populate_kernel(&init_mm, pmd, bm_pte);
-+ pmd_populate_user(&init_mm, pmd, bm_pte);
++ if (pmd_none(*pmd))
++#ifdef CONFIG_COMPAT_VDSO
++ pmd_populate_user(&init_mm, pmd, __bm_pte);
++#else
++ pmd_populate_kernel(&init_mm, pmd, __bm_pte);
++#endif
++ else
++ bm_pte = (pte_t *)pmd_page_vaddr(*pmd);
/*
* The boot-ioremap range spans multiple pmds, for which
@@ -33429,7 +34480,7 @@ index 4053bb5..b1ad3dc 100644
unsigned long uninitialized_var(pfn_align);
int i, nid;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
-index 89af288..05381957 100644
+index 727158c..91bc23b 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -260,7 +260,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
@@ -33476,11 +34527,9 @@ index 89af288..05381957 100644
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
return prot;
-@@ -438,23 +447,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
- static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
+@@ -437,16 +446,28 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
{
/* change init_mm */
-+ pax_open_kernel();
set_pte_atomic(kpte, pte);
+
#ifdef CONFIG_X86_32
@@ -33509,106 +34558,78 @@ index 89af288..05381957 100644
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
set_pte_atomic((pte_t *)pmd, pte);
- }
- }
- #endif
-+ pax_close_kernel();
- }
+@@ -505,7 +526,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
+ * up accordingly.
+ */
+ old_pte = *kpte;
+- old_prot = req_prot = pgprot_large_2_4k(pte_pgprot(old_pte));
++ old_prot = pte_pgprot(old_pte);
++ req_prot = pgprot_large_2_4k(old_prot);
- static int
+ pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
+ pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
-index 35af677..e7bf11f 100644
+index 188e3e0..5c75446 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
-@@ -89,7 +89,7 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
- unsigned long pg_flags = pg->flags & _PGMT_MASK;
-
- if (pg_flags == _PGMT_DEFAULT)
-- return -1;
-+ return _PAGE_CACHE_MODE_NUM;
- else if (pg_flags == _PGMT_WC)
- return _PAGE_CACHE_MODE_WC;
- else if (pg_flags == _PGMT_UC_MINUS)
-@@ -346,7 +346,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
-
- page = pfn_to_page(pfn);
- type = get_page_memtype(page);
-- if (type != -1) {
-+ if (type != _PAGE_CACHE_MODE_NUM) {
- pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
- start, end - 1, type, req_type);
- if (new_type)
-@@ -498,7 +498,7 @@ int free_memtype(u64 start, u64 end)
+@@ -588,7 +588,7 @@ int free_memtype(u64 start, u64 end)
if (!entry) {
- printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
-- current->comm, current->pid, start, end - 1);
+ pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
+- current->comm, current->pid, start, end - 1);
+ current->comm, task_pid_nr(current), start, end - 1);
return -EINVAL;
}
-@@ -532,10 +532,10 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
- page = pfn_to_page(paddr >> PAGE_SHIFT);
- rettype = get_page_memtype(page);
- /*
-- * -1 from get_page_memtype() implies RAM page is in its
-+ * _PAGE_CACHE_MODE_NUM from get_page_memtype() implies RAM page is in its
- * default state and not reserved, and hence of type WB
- */
-- if (rettype == -1)
-+ if (rettype == _PAGE_CACHE_MODE_NUM)
- rettype = _PAGE_CACHE_MODE_WB;
-
- return rettype;
-@@ -628,8 +628,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+@@ -711,8 +711,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
while (cursor < to) {
if (!devmem_is_allowed(pfn)) {
-- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
-- current->comm, from, to - 1);
-+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx), PAT prevents it\n",
-+ current->comm, from, to - 1, cursor);
+- pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
+- current->comm, from, to - 1);
++ pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx), PAT prevents it\n",
++ current->comm, from, to - 1, cursor);
return 0;
}
cursor += PAGE_SIZE;
-@@ -700,7 +700,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
+@@ -782,7 +782,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
+
if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
- printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
- "for [mem %#010Lx-%#010Lx]\n",
+ pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
- current->comm, current->pid,
+ current->comm, task_pid_nr(current),
cattr_name(pcm),
base, (unsigned long long)(base + size-1));
return -EINVAL;
-@@ -735,7 +735,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+@@ -817,7 +817,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
pcm = lookup_memtype(paddr);
if (want_pcm != pcm) {
- printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
+ pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
- current->comm, current->pid,
+ current->comm, task_pid_nr(current),
cattr_name(want_pcm),
(unsigned long long)paddr,
(unsigned long long)(paddr + size - 1),
-@@ -757,7 +757,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+@@ -838,7 +838,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+ !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
free_memtype(paddr, paddr + size);
- printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
- " for [mem %#010Lx-%#010Lx], got %s\n",
-- current->comm, current->pid,
-+ current->comm, task_pid_nr(current),
- cattr_name(want_pcm),
- (unsigned long long)paddr,
- (unsigned long long)(paddr + size - 1),
+ pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
+- current->comm, current->pid,
++ current->comm, task_pid_nr(current),
+ cattr_name(want_pcm),
+ (unsigned long long)paddr,
+ (unsigned long long)(paddr + size - 1),
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
-index 6582adc..fcc5d0b 100644
+index 6393108..890adda 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -161,7 +161,7 @@ success:
failure:
- printk(KERN_INFO "%s:%d conflicting memory types "
-- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
-+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
- end, cattr_name(found_type), cattr_name(match->type));
+ pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
+- current->comm, current->pid, start, end,
++ current->comm, task_pid_nr(current), start, end,
+ cattr_name(found_type), cattr_name(match->type));
return -EBUSY;
}
diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
@@ -33661,7 +34682,7 @@ index 9f0614d..92ae64a 100644
p += get_opcode(p, &opcode);
for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
-index 0b97d2c..597bb38 100644
+index fb0a9dd..72a6e6f 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -98,10 +98,75 @@ static inline void pgd_list_del(pgd_t *pgd)
@@ -33949,46 +34970,6 @@ index 0b97d2c..597bb38 100644
pgd_dtor(pgd);
paravirt_pgd_free(mm, pgd);
_pgd_free(pgd);
-diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
-index 75cc097..79a097f 100644
---- a/arch/x86/mm/pgtable_32.c
-+++ b/arch/x86/mm/pgtable_32.c
-@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
- return;
- }
- pte = pte_offset_kernel(pmd, vaddr);
-+
-+ pax_open_kernel();
- if (pte_val(pteval))
- set_pte_at(&init_mm, vaddr, pte, pteval);
- else
- pte_clear(&init_mm, vaddr, pte);
-+ pax_close_kernel();
-
- /*
- * It's enough to flush this one mapping.
-diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
-index e666cbb..61788c45 100644
---- a/arch/x86/mm/physaddr.c
-+++ b/arch/x86/mm/physaddr.c
-@@ -10,7 +10,7 @@
- #ifdef CONFIG_X86_64
-
- #ifdef CONFIG_DEBUG_VIRTUAL
--unsigned long __phys_addr(unsigned long x)
-+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
- {
- unsigned long y = x - __START_KERNEL_map;
-
-@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
- #else
-
- #ifdef CONFIG_DEBUG_VIRTUAL
--unsigned long __phys_addr(unsigned long x)
-+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
- {
- unsigned long phys_addr = x - PAGE_OFFSET;
- /* VMALLOC_* aren't constants */
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 90555bf..f5f1828 100644
--- a/arch/x86/mm/setup_nx.c
@@ -34082,18 +35063,18 @@ index 0000000..3fda3f3
+EXPORT_SYMBOL(__pax_close_userland);
+#endif
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
-index 6440221..f84b5c7 100644
+index 4093216..44b6b83 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
-@@ -9,6 +9,7 @@
+@@ -8,6 +8,7 @@
+ * of the License.
*/
#include <linux/linkage.h>
- #include <asm/dwarf2.h>
+#include <asm/alternative-asm.h>
/*
* Calling convention :
-@@ -38,6 +39,7 @@ sk_load_word_positive_offset:
+@@ -37,6 +38,7 @@ sk_load_word_positive_offset:
jle bpf_slow_path_word
mov (SKBDATA,%rsi),%eax
bswap %eax /* ntohl() */
@@ -34101,7 +35082,7 @@ index 6440221..f84b5c7 100644
ret
sk_load_half:
-@@ -55,6 +57,7 @@ sk_load_half_positive_offset:
+@@ -54,6 +56,7 @@ sk_load_half_positive_offset:
jle bpf_slow_path_half
movzwl (SKBDATA,%rsi),%eax
rol $8,%ax # ntohs()
@@ -34109,7 +35090,7 @@ index 6440221..f84b5c7 100644
ret
sk_load_byte:
-@@ -69,6 +72,7 @@ sk_load_byte_positive_offset:
+@@ -68,6 +71,7 @@ sk_load_byte_positive_offset:
cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
jle bpf_slow_path_byte
movzbl (SKBDATA,%rsi),%eax
@@ -34117,7 +35098,7 @@ index 6440221..f84b5c7 100644
ret
/* rsi contains offset and can be scratched */
-@@ -90,6 +94,7 @@ bpf_slow_path_word:
+@@ -89,6 +93,7 @@ bpf_slow_path_word:
js bpf_error
mov - MAX_BPF_STACK + 32(%rbp),%eax
bswap %eax
@@ -34125,7 +35106,7 @@ index 6440221..f84b5c7 100644
ret
bpf_slow_path_half:
-@@ -98,12 +103,14 @@ bpf_slow_path_half:
+@@ -97,12 +102,14 @@ bpf_slow_path_half:
mov - MAX_BPF_STACK + 32(%rbp),%ax
rol $8,%ax
movzwl %ax,%eax
@@ -34140,7 +35121,7 @@ index 6440221..f84b5c7 100644
ret
#define sk_negative_common(SIZE) \
-@@ -126,6 +133,7 @@ sk_load_word_negative_offset:
+@@ -125,6 +132,7 @@ sk_load_word_negative_offset:
sk_negative_common(4)
mov (%rax), %eax
bswap %eax
@@ -34148,7 +35129,7 @@ index 6440221..f84b5c7 100644
ret
bpf_slow_path_half_neg:
-@@ -137,6 +145,7 @@ sk_load_half_negative_offset:
+@@ -136,6 +144,7 @@ sk_load_half_negative_offset:
mov (%rax),%ax
rol $8,%ax
movzwl %ax,%eax
@@ -34156,7 +35137,7 @@ index 6440221..f84b5c7 100644
ret
bpf_slow_path_byte_neg:
-@@ -146,6 +155,7 @@ sk_load_byte_negative_offset:
+@@ -145,6 +154,7 @@ sk_load_byte_negative_offset:
.globl sk_load_byte_negative_offset
sk_negative_common(1)
movzbl (%rax), %eax
@@ -34164,19 +35145,19 @@ index 6440221..f84b5c7 100644
ret
bpf_error:
-@@ -156,4 +166,5 @@ bpf_error:
+@@ -155,4 +165,5 @@ bpf_error:
mov - MAX_BPF_STACK + 16(%rbp),%r14
mov - MAX_BPF_STACK + 24(%rbp),%r15
leaveq
+ pax_force_retaddr
ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
-index ddeff48..877ead6 100644
+index be2e7a2..e6960dd 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
-@@ -13,7 +13,11 @@
- #include <linux/if_vlan.h>
+@@ -14,7 +14,11 @@
#include <asm/cacheflush.h>
+ #include <linux/bpf.h>
+#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
+int bpf_jit_enable __read_only;
@@ -34186,7 +35167,7 @@ index ddeff48..877ead6 100644
/*
* assembly code in arch/x86/net/bpf_jit.S
-@@ -174,7 +178,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
+@@ -176,7 +180,9 @@ static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
static void jit_fill_hole(void *area, unsigned int size)
{
/* fill whole space with int3 instructions */
@@ -34196,7 +35177,7 @@ index ddeff48..877ead6 100644
}
struct jit_context {
-@@ -924,7 +930,9 @@ common_load:
+@@ -1026,7 +1032,9 @@ common_load:
pr_err("bpf_jit_compile fatal error\n");
return -EFAULT;
}
@@ -34206,7 +35187,7 @@ index ddeff48..877ead6 100644
}
proglen += ilen;
addrs[i] = proglen;
-@@ -1001,7 +1009,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
+@@ -1103,7 +1111,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
if (image) {
bpf_flush_icache(header, image + proglen);
@@ -34214,7 +35195,7 @@ index ddeff48..877ead6 100644
prog->bpf_func = (void *)image;
prog->jited = true;
}
-@@ -1014,12 +1021,8 @@ void bpf_jit_free(struct bpf_prog *fp)
+@@ -1116,12 +1123,8 @@ void bpf_jit_free(struct bpf_prog *fp)
unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
struct bpf_binary_header *header = (void *)addr;
@@ -34340,7 +35321,7 @@ index 71e8a67..6a313bb 100644
struct op_counter_config;
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
-index 852aa4c..71613f2 100644
+index 2706230..74b4d9f 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -258,7 +258,7 @@ int __init intel_mid_pci_init(void)
@@ -34353,7 +35334,7 @@ index 852aa4c..71613f2 100644
/* Continue with standard init */
return 1;
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
-index 5dc6ca5..25c03f5 100644
+index 9bd1154..e9d4656 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -51,7 +51,7 @@ struct irq_router {
@@ -34365,7 +35346,7 @@ index 5dc6ca5..25c03f5 100644
int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
void (*pcibios_disable_irq)(struct pci_dev *dev) = pirq_disable_irq;
-@@ -791,7 +791,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
+@@ -792,7 +792,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
return 0;
}
@@ -34374,7 +35355,7 @@ index 5dc6ca5..25c03f5 100644
{ PCI_VENDOR_ID_INTEL, intel_router_probe },
{ PCI_VENDOR_ID_AL, ali_router_probe },
{ PCI_VENDOR_ID_ITE, ite_router_probe },
-@@ -818,7 +818,7 @@ static struct pci_dev *pirq_router_dev;
+@@ -819,7 +819,7 @@ static struct pci_dev *pirq_router_dev;
static void __init pirq_find_router(struct irq_router *r)
{
struct irq_routing_table *rt = pirq_table;
@@ -34383,7 +35364,7 @@ index 5dc6ca5..25c03f5 100644
#ifdef CONFIG_PCI_BIOS
if (!rt->signature) {
-@@ -1091,7 +1091,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
+@@ -1092,7 +1092,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
return 0;
}
@@ -34943,7 +35924,7 @@ index 86d0f9e..6d499f4 100644
ENDPROC(efi_call)
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
-index 3005f0c..d06aeb0 100644
+index 01d54ea..ba1d71c 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -63,7 +63,7 @@ enum intel_mid_timer_options intel_mid_timer_options;
@@ -35014,7 +35995,7 @@ index aaca917..66eadbc 100644
return &tangier_ops;
}
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
-index 278e4da..55e8d8a 100644
+index 278e4da..35db1a9 100644
--- a/arch/x86/platform/intel-quark/imr_selftest.c
+++ b/arch/x86/platform/intel-quark/imr_selftest.c
@@ -55,7 +55,7 @@ static void __init imr_self_test_result(int res, const char *fmt, ...)
@@ -35022,7 +36003,7 @@ index 278e4da..55e8d8a 100644
static void __init imr_self_test(void)
{
- phys_addr_t base = virt_to_phys(&_text);
-+ phys_addr_t base = virt_to_phys(ktla_ktva(_text));
++ phys_addr_t base = virt_to_phys((void *)ktla_ktva((unsigned long)_text));
size_t size = virt_to_phys(&__end_rodata) - base;
const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
int ret;
@@ -35040,7 +36021,7 @@ index d6ee929..3637cb5 100644
.getproplen = olpc_dt_getproplen,
.getproperty = olpc_dt_getproperty,
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
-index 757678f..9895d9b 100644
+index 9ab5279..8ba4611 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -134,11 +134,8 @@ static void do_fpu_end(void)
@@ -35069,7 +36050,7 @@ index 757678f..9895d9b 100644
#endif
load_TR_desc(); /* This does ltr */
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
-index 0b7a63d..0d0f2c2 100644
+index 0b7a63d..dff2199 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -68,7 +68,13 @@ void __init setup_real_mode(void)
@@ -35077,7 +36058,7 @@ index 0b7a63d..0d0f2c2 100644
#ifdef CONFIG_X86_32
- trampoline_header->start = __pa_symbol(startup_32_smp);
-+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
++ trampoline_header->start = __pa_symbol(ktla_ktva((unsigned long)startup_32_smp));
+
+#ifdef CONFIG_PAX_KERNEXEC
+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
@@ -35236,7 +36217,7 @@ index 604a37e..e49702a 100644
relocs-objs := relocs_32.o relocs_64.o relocs_common.o
PHONY += relocs
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
-index 0c2fae8..88036b7 100644
+index 0c2fae8..88d7719 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -1,5 +1,7 @@
@@ -35404,6 +36385,15 @@ index 0c2fae8..88036b7 100644
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_REL_TYPE) {
+@@ -697,7 +745,7 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
+ * kernel data and does not require special treatment.
+ *
+ */
+-static int per_cpu_shndx = -1;
++static unsigned int per_cpu_shndx = ~0;
+ static Elf_Addr per_cpu_load_addr;
+
+ static void percpu_init(void)
@@ -830,6 +878,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
{
unsigned r_type = ELF32_R_TYPE(rel->r_info);
@@ -35451,10 +36441,10 @@ index 0c2fae8..88036b7 100644
percpu_init();
if (show_absolute_syms) {
diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
-index f40281e..92728c9 100644
+index 744afdc..a0b8a0d 100644
--- a/arch/x86/um/mem_32.c
+++ b/arch/x86/um/mem_32.c
-@@ -21,7 +21,7 @@ static int __init gate_vma_init(void)
+@@ -20,7 +20,7 @@ static int __init gate_vma_init(void)
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
@@ -35464,10 +36454,10 @@ index f40281e..92728c9 100644
return 0;
}
diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
-index 80ffa5b..a33bd15 100644
+index 48e3858..ab4458c 100644
--- a/arch/x86/um/tls_32.c
+++ b/arch/x86/um/tls_32.c
-@@ -260,7 +260,7 @@ out:
+@@ -261,7 +261,7 @@ out:
if (unlikely(task == current &&
!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
printk(KERN_ERR "get_tls_entry: task with pid %d got here "
@@ -35476,157 +36466,6 @@ index 80ffa5b..a33bd15 100644
}
return 0;
-diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
-index e970320..c006fea 100644
---- a/arch/x86/vdso/Makefile
-+++ b/arch/x86/vdso/Makefile
-@@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
- sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-
--VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
-+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
- $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
- GCOV_PROFILE := n
-
-diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
-index 0224987..c7d65a5 100644
---- a/arch/x86/vdso/vdso2c.h
-+++ b/arch/x86/vdso/vdso2c.h
-@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
- unsigned long load_size = -1; /* Work around bogus warning */
- unsigned long mapping_size;
- ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
-- int i;
-+ unsigned int i;
- unsigned long j;
- ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
- *alt_sec = NULL;
-diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
-index e904c27..b9eaa03 100644
---- a/arch/x86/vdso/vdso32-setup.c
-+++ b/arch/x86/vdso/vdso32-setup.c
-@@ -14,6 +14,7 @@
- #include <asm/cpufeature.h>
- #include <asm/processor.h>
- #include <asm/vdso.h>
-+#include <asm/mman.h>
-
- #ifdef CONFIG_COMPAT_VDSO
- #define VDSO_DEFAULT 0
-diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
-index 1c9f750..cfddb1a 100644
---- a/arch/x86/vdso/vma.c
-+++ b/arch/x86/vdso/vma.c
-@@ -19,10 +19,7 @@
- #include <asm/page.h>
- #include <asm/hpet.h>
- #include <asm/desc.h>
--
--#if defined(CONFIG_X86_64)
--unsigned int __read_mostly vdso64_enabled = 1;
--#endif
-+#include <asm/mman.h>
-
- void __init init_vdso_image(const struct vdso_image *image)
- {
-@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
- .pages = no_pages,
- };
-
-+#ifdef CONFIG_PAX_RANDMMAP
-+ if (mm->pax_flags & MF_PAX_RANDMMAP)
-+ calculate_addr = false;
-+#endif
-+
- if (calculate_addr) {
- addr = vdso_addr(current->mm->start_stack,
- image->size - image->sym_vvar_start);
-@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
- down_write(&mm->mmap_sem);
-
- addr = get_unmapped_area(NULL, addr,
-- image->size - image->sym_vvar_start, 0, 0);
-+ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
- if (IS_ERR_VALUE(addr)) {
- ret = addr;
- goto up_fail;
- }
-
- text_start = addr - image->sym_vvar_start;
-- current->mm->context.vdso = (void __user *)text_start;
-+ mm->context.vdso = text_start;
-
- /*
- * MAYWRITE to allow gdb to COW and set breakpoints
-@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
- hpet_address >> PAGE_SHIFT,
- PAGE_SIZE,
- pgprot_noncached(PAGE_READONLY));
--
-- if (ret)
-- goto up_fail;
- }
- #endif
-
- up_fail:
- if (ret)
-- current->mm->context.vdso = NULL;
-+ current->mm->context.vdso = 0;
-
- up_write(&mm->mmap_sem);
- return ret;
-@@ -191,8 +190,8 @@ static int load_vdso32(void)
-
- if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
- current_thread_info()->sysenter_return =
-- current->mm->context.vdso +
-- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
-+ (void __force_user *)(current->mm->context.vdso +
-+ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
-
- return 0;
- }
-@@ -201,9 +200,6 @@ static int load_vdso32(void)
- #ifdef CONFIG_X86_64
- int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
- {
-- if (!vdso64_enabled)
-- return 0;
--
- return map_vdso(&vdso_image_64, true);
- }
-
-@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
- int uses_interp)
- {
- #ifdef CONFIG_X86_X32_ABI
-- if (test_thread_flag(TIF_X32)) {
-- if (!vdso64_enabled)
-- return 0;
--
-+ if (test_thread_flag(TIF_X32))
- return map_vdso(&vdso_image_x32, true);
-- }
- #endif
-
- return load_vdso32();
-@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
- #endif
-
- #ifdef CONFIG_X86_64
--static __init int vdso_setup(char *s)
--{
-- vdso64_enabled = simple_strtoul(s, NULL, 0);
-- return 0;
--}
--__setup("vdso=", vdso_setup);
--#endif
--
--#ifdef CONFIG_X86_64
- static void vgetcpu_cpu_init(void *arg)
- {
- int cpu = smp_processor_id();
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 4841453..d59a203 100644
--- a/arch/x86/xen/Kconfig
@@ -35640,7 +36479,7 @@ index 4841453..d59a203 100644
This is the Linux Xen port. Enabling this will allow the
kernel to boot in a paravirtualized environment under the
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
-index a671e83..a9dc1d9 100644
+index 11d6fb4..c581662 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -125,8 +125,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
@@ -35681,7 +36520,7 @@ index a671e83..a9dc1d9 100644
BUG_ON(va & ~PAGE_MASK);
for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
-@@ -1263,30 +1259,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
+@@ -1264,30 +1260,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
#endif
};
@@ -35719,7 +36558,7 @@ index a671e83..a9dc1d9 100644
{
if (pm_power_off)
pm_power_off();
-@@ -1439,8 +1435,11 @@ static void __ref xen_setup_gdt(int cpu)
+@@ -1440,8 +1436,11 @@ static void __ref xen_setup_gdt(int cpu)
pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
pv_cpu_ops.load_gdt = xen_load_gdt_boot;
@@ -35733,7 +36572,7 @@ index a671e83..a9dc1d9 100644
pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
pv_cpu_ops.load_gdt = xen_load_gdt;
-@@ -1555,7 +1554,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
+@@ -1557,7 +1556,17 @@ asmlinkage __visible void __init xen_start_kernel(void)
__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
/* Work out if we support NX */
@@ -35752,7 +36591,7 @@ index a671e83..a9dc1d9 100644
/* Get mfn list */
xen_build_dynamic_phys_to_machine();
-@@ -1583,13 +1592,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
+@@ -1585,13 +1594,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
machine_ops = xen_machine_ops;
@@ -35767,7 +36606,7 @@ index a671e83..a9dc1d9 100644
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
-index dd151b2..d5ab952 100644
+index dd151b2..d87e22e 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
@@ -35791,7 +36630,7 @@ index dd151b2..d5ab952 100644
convert_pfn_mfn(level2_fixmap_pgt);
}
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
-@@ -1860,11 +1864,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+@@ -1860,11 +1864,22 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
@@ -35804,6 +36643,10 @@ index dd151b2..d5ab952 100644
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
++ set_page_prot(level1_modules_pgt[0], PAGE_KERNEL_RO);
++ set_page_prot(level1_modules_pgt[1], PAGE_KERNEL_RO);
++ set_page_prot(level1_modules_pgt[2], PAGE_KERNEL_RO);
++ set_page_prot(level1_modules_pgt[3], PAGE_KERNEL_RO);
+ set_page_prot(level1_fixmap_pgt[0], PAGE_KERNEL_RO);
+ set_page_prot(level1_fixmap_pgt[1], PAGE_KERNEL_RO);
+ set_page_prot(level1_fixmap_pgt[2], PAGE_KERNEL_RO);
@@ -35811,7 +36654,7 @@ index dd151b2..d5ab952 100644
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
-@@ -2048,6 +2059,7 @@ static void __init xen_post_allocator_init(void)
+@@ -2048,6 +2063,7 @@ static void __init xen_post_allocator_init(void)
pv_mmu_ops.set_pud = xen_set_pud;
#if CONFIG_PGTABLE_LEVELS == 4
pv_mmu_ops.set_pgd = xen_set_pgd;
@@ -35819,7 +36662,7 @@ index dd151b2..d5ab952 100644
#endif
/* This will work as long as patching hasn't happened yet
-@@ -2126,6 +2138,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
+@@ -2126,6 +2142,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.pud_val = PV_CALLEE_SAVE(xen_pud_val),
.make_pud = PV_CALLEE_SAVE(xen_make_pud),
.set_pgd = xen_set_pgd_hyper,
@@ -35916,7 +36759,7 @@ index 8afdfcc..79239db 100644
mov %rsi,xen_start_info
mov $init_thread_union+THREAD_SIZE,%rsp
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
-index bef30cb..f1a0d68 100644
+index 2292721..a9bb18e 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -16,8 +16,6 @@ void xen_syscall_target(void);
@@ -35967,10 +36810,10 @@ index 2f33760..835e50a 100644
#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
diff --git a/block/bio.c b/block/bio.c
-index 4441522..dedbafc 100644
+index d6e5ba3..2bb142c 100644
--- a/block/bio.c
+++ b/block/bio.c
-@@ -1172,7 +1172,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
+@@ -1187,7 +1187,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
/*
* Overflow, abort
*/
@@ -35979,7 +36822,7 @@ index 4441522..dedbafc 100644
return ERR_PTR(-EINVAL);
nr_pages += end - start;
-@@ -1297,7 +1297,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+@@ -1312,7 +1312,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
/*
* Overflow, abort
*/
@@ -36081,10 +36924,10 @@ index f678c73..f35aa18 100644
err = -EFAULT;
goto out;
diff --git a/block/genhd.c b/block/genhd.c
-index ea982ea..86e0f9e 100644
+index 59a1395..54ff187 100644
--- a/block/genhd.c
+++ b/block/genhd.c
-@@ -469,21 +469,24 @@ static char *bdevt_str(dev_t devt, char *buf)
+@@ -470,21 +470,24 @@ static char *bdevt_str(dev_t devt, char *buf)
/*
* Register device numbers dev..(dev+range-1)
@@ -36138,7 +36981,7 @@ index 26cb624..a49c3a5 100644
(u8 *) pte, count) < count) {
kfree(pte);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
-index 55b6f15..b602c9a 100644
+index dda653c..028a13ee 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
@@ -36202,7 +37045,7 @@ index 55b6f15..b602c9a 100644
goto error;
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
-index b0602ba..fb71850 100644
+index 22ba81f..1acac67 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
@@ -36224,10 +37067,10 @@ index b0602ba..fb71850 100644
static void cryptd_queue_worker(struct work_struct *work);
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
-index c305d41..a96de79 100644
+index 45e7d51..2967121 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
-@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
+@@ -385,7 +385,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
int ret;
pinst->kobj.kset = pcrypt_kset;
@@ -36237,10 +37080,10 @@ index c305d41..a96de79 100644
kobject_uevent(&pinst->kobj, KOBJ_ADD);
diff --git a/crypto/zlib.c b/crypto/zlib.c
-index 0eefa9d..0fa3d29 100644
+index d51a30a..fb1f8af 100644
--- a/crypto/zlib.c
+++ b/crypto/zlib.c
-@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, void *params,
+@@ -95,10 +95,10 @@ static int zlib_compress_setup(struct crypto_pcomp *tfm, const void *params,
zlib_comp_exit(ctx);
window_bits = tb[ZLIB_COMP_WINDOWBITS]
@@ -36253,11 +37096,24 @@ index 0eefa9d..0fa3d29 100644
: DEF_MEM_LEVEL;
workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
+diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
+index 8c2fe2f..fc47c12 100644
+--- a/drivers/acpi/acpi_video.c
++++ b/drivers/acpi/acpi_video.c
+@@ -398,7 +398,7 @@ static int video_disable_backlight_sysfs_if(
+ return 0;
+ }
+
+-static struct dmi_system_id video_dmi_table[] = {
++static const struct dmi_system_id video_dmi_table[] = {
+ /*
+ * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
+ */
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
-index 3b37676..898edfa 100644
+index 52dfd0d..8386baf 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
-@@ -63,11 +63,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
+@@ -70,11 +70,12 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
/* Legacy functions are optional, based upon ACPI_REDUCED_HARDWARE */
static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
@@ -36289,7 +37145,7 @@ index 16129c7..8b675cd 100644
struct apei_exec_context {
u32 ip;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
-index e82d097..0c855c1 100644
+index 2bfd53c..391e9a4 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -478,7 +478,7 @@ static void __ghes_print_estatus(const char *pfx,
@@ -36328,7 +37184,7 @@ index a83e3c6..c3d617f 100644
bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
if (!bgrt_kobj)
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
-index 1d17919..315e955 100644
+index 278dc4b..976433d 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -51,7 +51,7 @@ struct acpi_blacklist_item {
@@ -36340,9 +37196,9 @@ index 1d17919..315e955 100644
/*
* POLICY: If *anything* doesn't work, put it on the blacklist.
-@@ -163,7 +163,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
- return 0;
+@@ -172,7 +172,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
}
+ #endif
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
@@ -36387,10 +37243,10 @@ index c68e724..e863008 100644
/* parse the table header to get the table length */
if (count <= sizeof(struct acpi_table_header))
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
-index 8217e0b..3294cb6 100644
+index 88dbbb1..90714c0 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
-@@ -1026,6 +1026,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
+@@ -1045,6 +1045,8 @@ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
#endif /* CONFIG_PM_SLEEP */
@@ -36399,7 +37255,7 @@ index 8217e0b..3294cb6 100644
static struct dev_pm_domain acpi_general_pm_domain = {
.ops = {
.runtime_suspend = acpi_subsys_runtime_suspend,
-@@ -1042,6 +1044,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
+@@ -1061,6 +1063,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.restore_early = acpi_subsys_resume_early,
#endif
},
@@ -36407,7 +37263,7 @@ index 8217e0b..3294cb6 100644
};
/**
-@@ -1111,7 +1114,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
+@@ -1130,7 +1133,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
acpi_device_wakeup(adev, ACPI_STATE_S0, false);
}
@@ -36416,10 +37272,10 @@ index 8217e0b..3294cb6 100644
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
-index 5e8fed4..d9bb545 100644
+index 9d4761d..ece2163 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
-@@ -1293,7 +1293,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
+@@ -1434,7 +1434,7 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
return 0;
}
@@ -36455,7 +37311,7 @@ index d9f7158..168e742 100644
};
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
-index 39e0c8e..b5ae20c 100644
+index d540f42..d5b32ac 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -910,7 +910,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
@@ -36468,10 +37324,10 @@ index 39e0c8e..b5ae20c 100644
if (!pr->flags.power_setup_done)
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
-index e5dd808..1eceed1 100644
+index 7cfbda4..74f738c 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
-@@ -176,7 +176,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
+@@ -173,7 +173,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
return 0;
}
@@ -36512,10 +37368,10 @@ index 0876d77b..3ba0127 100644
static void delete_gpe_attr_array(void)
{
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
-index d24fa19..782f1e6 100644
+index 6d4e44e..44fb839 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
-@@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
+@@ -1212,7 +1212,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
return 0;
}
@@ -36524,21 +37380,40 @@ index d24fa19..782f1e6 100644
/*
* Award BIOS on this AOpen makes thermal control almost worthless.
* http://bugzilla.kernel.org/show_bug.cgi?id=8842
-diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
-index cc79d3f..28adb33 100644
---- a/drivers/acpi/video.c
-+++ b/drivers/acpi/video.c
-@@ -431,7 +431,7 @@ static int __init video_enable_native_backlight(const struct dmi_system_id *d)
- return 0;
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index 2922f1f..26b0c03 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -41,7 +41,6 @@ ACPI_MODULE_NAME("video");
+ void acpi_video_unregister_backlight(void);
+
+ static bool backlight_notifier_registered;
+-static struct notifier_block backlight_nb;
+ static struct work_struct backlight_notify_work;
+
+ static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
+@@ -284,6 +283,10 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
+ return NOTIFY_OK;
}
--static struct dmi_system_id video_dmi_table[] __initdata = {
-+static const struct dmi_system_id video_dmi_table[] __initconst = {
- /*
- * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
- */
++static const struct notifier_block backlight_nb = {
++ .notifier_call = acpi_video_backlight_notify,
++};
++
+ /*
+ * Determine which type of backlight interface to use on this system,
+ * First check cmdline, then dmi quirks, then do autodetect.
+@@ -314,8 +317,6 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
+ &video_caps, NULL);
+ INIT_WORK(&backlight_notify_work,
+ acpi_video_backlight_notify_work);
+- backlight_nb.notifier_call = acpi_video_backlight_notify;
+- backlight_nb.priority = 0;
+ if (backlight_register_notifier(&backlight_nb) == 0)
+ backlight_notifier_registered = true;
+ init_done = true;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
-index 287c4ba..6a600bc 100644
+index d256a66..4040556 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1252,7 +1252,7 @@ int ahci_kick_engine(struct ata_port *ap)
@@ -36551,7 +37426,7 @@ index 287c4ba..6a600bc 100644
unsigned long timeout_msec)
{
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
-index e0064d1..e53c75e 100644
+index 790e0de..6bae378 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -102,7 +102,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
@@ -36636,7 +37511,7 @@ index f840ca1..edd6ef3 100644
extern int libata_fua;
extern int libata_noacpi;
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
-index a9b0c82..207d97d 100644
+index 5d9ee99..8fa2585 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
@@ -36903,7 +37778,7 @@ index 75dde90..4309ead 100644
fore200e->tx_sat++;
DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
-index 93dca2e..c5daa69 100644
+index a8da3a5..67cf6c2 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1692,7 +1692,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
@@ -37168,7 +38043,7 @@ index 074616b..d6b3d5f 100644
}
atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
-index 924f8e2..3375a3e 100644
+index 65e6590..df77d04 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
@@ -37645,10 +38520,10 @@ index cecfb94..87009ec 100644
}
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
-index 79bc203..fa3945b 100644
+index 5005924..9fc06c4 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
-@@ -1126,7 +1126,7 @@ int subsys_interface_register(struct subsys_interface *sif)
+@@ -1141,7 +1141,7 @@ int subsys_interface_register(struct subsys_interface *sif)
return -EINVAL;
mutex_lock(&subsys->p->mutex);
@@ -37657,7 +38532,7 @@ index 79bc203..fa3945b 100644
if (sif->add_dev) {
subsys_dev_iter_init(&iter, subsys, NULL, NULL);
while ((dev = subsys_dev_iter_next(&iter)))
-@@ -1151,7 +1151,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
+@@ -1166,7 +1166,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
subsys = sif->subsys;
mutex_lock(&subsys->p->mutex);
@@ -37695,10 +38570,10 @@ index 68f0314..ca2a609 100644
while (1) {
spin_lock(&req_lock);
diff --git a/drivers/base/node.c b/drivers/base/node.c
-index a2aa65b..8831326 100644
+index 560751b..3a4847a 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
-@@ -613,7 +613,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
+@@ -627,7 +627,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
struct node_attr {
struct device_attribute attr;
enum node_states state;
@@ -37708,10 +38583,10 @@ index a2aa65b..8831326 100644
static ssize_t show_node_state(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
-index 2327613..211d7f5 100644
+index 0ee43c1..369dd62 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
-@@ -1725,7 +1725,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
+@@ -1738,7 +1738,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
{
struct cpuidle_driver *cpuidle_drv;
struct gpd_cpuidle_data *cpuidle_data;
@@ -37720,7 +38595,7 @@ index 2327613..211d7f5 100644
int ret = 0;
if (IS_ERR_OR_NULL(genpd) || state < 0)
-@@ -1793,7 +1793,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
+@@ -1806,7 +1806,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
{
struct gpd_cpuidle_data *cpuidle_data;
@@ -37729,7 +38604,7 @@ index 2327613..211d7f5 100644
int ret = 0;
if (IS_ERR_OR_NULL(genpd))
-@@ -2222,8 +2222,11 @@ int genpd_dev_pm_attach(struct device *dev)
+@@ -2241,8 +2241,11 @@ int genpd_dev_pm_attach(struct device *dev)
return ret;
}
@@ -37757,10 +38632,10 @@ index d2be3f9..0a3167a 100644
static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
-index 7726200..a417da7 100644
+index 51f15bc..892a668 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
-@@ -32,14 +32,14 @@ static bool pm_abort_suspend __read_mostly;
+@@ -33,14 +33,14 @@ static bool pm_abort_suspend __read_mostly;
* They need to be modified together atomically, so it's better to use one
* atomic variable to hold them both.
*/
@@ -37777,7 +38652,7 @@ index 7726200..a417da7 100644
*cnt = (comb >> IN_PROGRESS_BITS);
*inpr = comb & MAX_IN_PROGRESS;
-@@ -404,7 +404,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
+@@ -537,7 +537,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
ws->start_prevent_time = ws->last_time;
/* Increment the counter of events in progress. */
@@ -37786,7 +38661,7 @@ index 7726200..a417da7 100644
trace_wakeup_source_activate(ws->name, cec);
}
-@@ -530,7 +530,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
+@@ -663,7 +663,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
* Increment the counter of registered wakeup events and decrement the
* couter of wakeup events in progress simultaneously.
*/
@@ -37795,6 +38670,60 @@ index 7726200..a417da7 100644
trace_wakeup_source_deactivate(ws->name, cec);
split_counters(&cnt, &inpr);
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index 5799a0b..f7c7a7e 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -30,10 +30,9 @@ static LIST_HEAD(regmap_debugfs_early_list);
+ static DEFINE_MUTEX(regmap_debugfs_early_lock);
+
+ /* Calculate the length of a fixed format */
+-static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
++static size_t regmap_calc_reg_len(int max_val)
+ {
+- snprintf(buf, buf_size, "%x", max_val);
+- return strlen(buf);
++ return snprintf(NULL, 0, "%x", max_val);
+ }
+
+ static ssize_t regmap_name_read_file(struct file *file,
+@@ -174,8 +173,7 @@ static inline void regmap_calc_tot_len(struct regmap *map,
+ {
+ /* Calculate the length of a fixed format */
+ if (!map->debugfs_tot_len) {
+- map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
+- buf, count);
++ map->debugfs_reg_len = regmap_calc_reg_len(map->max_register);
+ map->debugfs_val_len = 2 * map->format.val_bytes;
+ map->debugfs_tot_len = map->debugfs_reg_len +
+ map->debugfs_val_len + 3; /* : \n */
+@@ -405,7 +403,7 @@ static ssize_t regmap_access_read_file(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+ {
+- int reg_len, tot_len;
++ size_t reg_len, tot_len;
+ size_t buf_pos = 0;
+ loff_t p = 0;
+ ssize_t ret;
+@@ -421,7 +419,7 @@ static ssize_t regmap_access_read_file(struct file *file,
+ return -ENOMEM;
+
+ /* Calculate the length of a fixed format */
+- reg_len = regmap_calc_reg_len(map->max_register, buf, count);
++ reg_len = regmap_calc_reg_len(map->max_register);
+ tot_len = reg_len + 10; /* ': R W V P\n' */
+
+ for (i = 0; i <= map->max_register; i += map->reg_stride) {
+@@ -432,7 +430,7 @@ static ssize_t regmap_access_read_file(struct file *file,
+ /* If we're in the region the user is trying to read */
+ if (p >= *ppos) {
+ /* ...but not beyond it */
+- if (buf_pos >= count - 1 - tot_len)
++ if (buf_pos + tot_len + 1 >= count)
+ break;
+
+ /* Format the register */
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 8d98a32..61d3165 100644
--- a/drivers/base/syscore.c
@@ -37818,10 +38747,10 @@ index 8d98a32..61d3165 100644
}
EXPORT_SYMBOL_GPL(unregister_syscore_ops);
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
-index ff20f19..018f1da 100644
+index 0422c47..b222c7a 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
-@@ -3008,7 +3008,7 @@ static void start_io(ctlr_info_t *h)
+@@ -3024,7 +3024,7 @@ static void start_io(ctlr_info_t *h)
while (!list_empty(&h->reqQ)) {
c = list_entry(h->reqQ.next, CommandList_struct, list);
/* can't do anything if fifo is full */
@@ -37830,7 +38759,7 @@ index ff20f19..018f1da 100644
dev_warn(&h->pdev->dev, "fifo full\n");
break;
}
-@@ -3018,7 +3018,7 @@ static void start_io(ctlr_info_t *h)
+@@ -3034,7 +3034,7 @@ static void start_io(ctlr_info_t *h)
h->Qdepth--;
/* Tell the controller execute command */
@@ -37839,7 +38768,7 @@ index ff20f19..018f1da 100644
/* Put job onto the completed Q */
addQ(&h->cmpQ, c);
-@@ -3444,17 +3444,17 @@ startio:
+@@ -3460,17 +3460,17 @@ startio:
static inline unsigned long get_next_completion(ctlr_info_t *h)
{
@@ -37860,7 +38789,7 @@ index ff20f19..018f1da 100644
(h->interrupts_enabled == 0));
}
-@@ -3487,7 +3487,7 @@ static inline u32 next_command(ctlr_info_t *h)
+@@ -3503,7 +3503,7 @@ static inline u32 next_command(ctlr_info_t *h)
u32 a;
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
@@ -37869,7 +38798,7 @@ index ff20f19..018f1da 100644
if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
a = *(h->reply_pool_head); /* Next cmd in ring buffer */
-@@ -4044,7 +4044,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
+@@ -4060,7 +4060,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
trans_support & CFGTBL_Trans_use_short_tags);
/* Change the access methods to the performant access methods */
@@ -37878,7 +38807,7 @@ index ff20f19..018f1da 100644
h->transMethod = CFGTBL_Trans_Performant;
return;
-@@ -4318,7 +4318,7 @@ static int cciss_pci_init(ctlr_info_t *h)
+@@ -4334,7 +4334,7 @@ static int cciss_pci_init(ctlr_info_t *h)
if (prod_index < 0)
return -ENODEV;
h->product_name = products[prod_index].product_name;
@@ -37887,7 +38816,7 @@ index ff20f19..018f1da 100644
if (cciss_board_disabled(h)) {
dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
-@@ -5050,7 +5050,7 @@ reinit_after_soft_reset:
+@@ -5065,7 +5065,7 @@ reinit_after_soft_reset:
}
/* make sure the board interrupts are off */
@@ -37896,7 +38825,7 @@ index ff20f19..018f1da 100644
rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
if (rc)
goto clean2;
-@@ -5100,7 +5100,7 @@ reinit_after_soft_reset:
+@@ -5115,7 +5115,7 @@ reinit_after_soft_reset:
* fake ones to scoop up any residual completions.
*/
spin_lock_irqsave(&h->lock, flags);
@@ -37905,7 +38834,7 @@ index ff20f19..018f1da 100644
spin_unlock_irqrestore(&h->lock, flags);
free_irq(h->intr[h->intr_mode], h);
rc = cciss_request_irq(h, cciss_msix_discard_completions,
-@@ -5120,9 +5120,9 @@ reinit_after_soft_reset:
+@@ -5135,9 +5135,9 @@ reinit_after_soft_reset:
dev_info(&h->pdev->dev, "Board READY.\n");
dev_info(&h->pdev->dev,
"Waiting for stale completions to drain.\n");
@@ -37917,7 +38846,7 @@ index ff20f19..018f1da 100644
rc = controller_reset_failed(h->cfgtable);
if (rc)
-@@ -5145,7 +5145,7 @@ reinit_after_soft_reset:
+@@ -5160,7 +5160,7 @@ reinit_after_soft_reset:
cciss_scsi_setup(h);
/* Turn the interrupts on so we can service requests */
@@ -37926,7 +38855,7 @@ index ff20f19..018f1da 100644
/* Get the firmware version */
inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
-@@ -5217,7 +5217,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
+@@ -5232,7 +5232,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
kfree(flush_buf);
if (return_code != IO_OK)
dev_warn(&h->pdev->dev, "Error flushing cache\n");
@@ -38138,10 +39067,10 @@ index 434c77d..6d3219a 100644
}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
-index b905e98..0812ed8 100644
+index efd19c2..6ccfa94 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
-@@ -385,7 +385,7 @@ struct drbd_epoch {
+@@ -386,7 +386,7 @@ struct drbd_epoch {
struct drbd_connection *connection;
struct list_head list;
unsigned int barrier_nr;
@@ -38150,7 +39079,7 @@ index b905e98..0812ed8 100644
atomic_t active; /* increased on every req. added, and dec on every finished. */
unsigned long flags;
};
-@@ -946,7 +946,7 @@ struct drbd_device {
+@@ -947,7 +947,7 @@ struct drbd_device {
unsigned int al_tr_number;
int al_tr_cycle;
wait_queue_head_t seq_wait;
@@ -38159,7 +39088,7 @@ index b905e98..0812ed8 100644
unsigned int peer_seq;
spinlock_t peer_seq_lock;
unsigned long comm_bm_set; /* communicated number of set bits. */
-@@ -955,8 +955,8 @@ struct drbd_device {
+@@ -956,8 +956,8 @@ struct drbd_device {
struct mutex own_state_mutex;
struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
char congestion_reason; /* Why we where congested... */
@@ -38171,7 +39100,7 @@ index b905e98..0812ed8 100644
int rs_last_events; /* counter of read or write "events" (unit sectors)
* on the lower level device when we last looked. */
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
-index 81fde9e..9948c05 100644
+index a151853..b9b5baa 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1328,7 +1328,7 @@ static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet
@@ -38235,7 +39164,7 @@ index 74df8cf..e41fc24 100644
if (!msg)
goto failed;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
-index cee2035..22f66bd 100644
+index c097909..13688e1 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -870,7 +870,7 @@ int drbd_connected(struct drbd_peer_device *peer_device)
@@ -38421,10 +39350,10 @@ index d0fae55..4469096 100644
device->rs_last_events =
(int)part_stat_read(&disk->part0, sectors[0]) +
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
-index 09e628da..7607aaa 100644
+index 4c20c22..caef1eb 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
-@@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
+@@ -109,7 +109,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
{
@@ -38433,7 +39362,7 @@ index 09e628da..7607aaa 100644
}
/*
-@@ -1890,7 +1890,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
+@@ -1891,7 +1891,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
return -EROFS;
}
pd->settings.fp = ti.fp;
@@ -38443,7 +39372,7 @@ index 09e628da..7607aaa 100644
if (ti.nwa_v) {
pd->nwa = be32_to_cpu(ti.next_writable);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
-index 010ce0b..7c0049e 100644
+index bc67a93..d552e86 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -64,7 +64,7 @@
@@ -38526,7 +39455,7 @@ index e5565fb..71be10b4 100644
+ .command_completed = smart1_completed,
};
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
-index 55c135b..9f8d60c 100644
+index 7a722df..54b76ab 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -288,7 +288,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
@@ -38603,7 +39532,7 @@ index 584bc31..e64a12c 100644
static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
-index a4af822..ed58cd1 100644
+index a043107..1263e4a 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -17,7 +17,8 @@ config DEVMEM
@@ -38659,6 +39588,21 @@ index 09f17eb..8531d2f 100644
sizeof(struct agp_segment) * reserve.seg_count)) {
kfree(segment);
return -EFAULT;
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index c6dea3f..72ae4b0 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -1408,8 +1408,8 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
+ }
+ EXPORT_SYMBOL(intel_gmch_probe);
+
+-void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
+- phys_addr_t *mappable_base, unsigned long *mappable_end)
++void intel_gtt_get(uint64_t *gtt_total, uint64_t *stolen_size,
++ uint64_t *mappable_base, uint64_t *mappable_end)
+ {
+ *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
+ *stolen_size = intel_private.stolen_size;
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
index 4f94375..413694e 100644
--- a/drivers/char/genrtc.c
@@ -38684,19 +39628,6 @@ index 5c0baa9..44011b1 100644
struct hpet_info *info)
{
struct hpet_timer __iomem *timer;
-diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
-index 3c1a123..a33c99f 100644
---- a/drivers/char/i8k.c
-+++ b/drivers/char/i8k.c
-@@ -790,7 +790,7 @@ static const struct i8k_config_data i8k_config_data[] = {
- },
- };
-
--static struct dmi_system_id i8k_dmi_table[] __initdata = {
-+static const struct dmi_system_id i8k_dmi_table[] __initconst = {
- {
- .ident = "Dell Inspiron",
- .matches = {
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index bf75f63..359fa10 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
@@ -38928,7 +39859,7 @@ index 9df78e2..01ba9ae 100644
*ppos = i;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
-index 0ea9986..e7b07e4 100644
+index 7680d52..073f799e 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2345,7 +2345,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
@@ -39000,7 +39931,7 @@ index 0ea9986..e7b07e4 100644
if (cmd != SIOCWANDEV)
diff --git a/drivers/char/random.c b/drivers/char/random.c
-index 9cd6968..6416f00 100644
+index d0da5d8..739fd3a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -289,9 +289,6 @@
@@ -39013,7 +39944,7 @@ index 9cd6968..6416f00 100644
*/
#define ENTROPY_SHIFT 3
#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
-@@ -439,9 +436,9 @@ struct entropy_store {
+@@ -442,9 +439,9 @@ struct entropy_store {
};
static void push_to_pool(struct work_struct *work);
@@ -39026,7 +39957,7 @@ index 9cd6968..6416f00 100644
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
-@@ -635,7 +632,7 @@ retry:
+@@ -654,7 +651,7 @@ retry:
/* The +2 corresponds to the /4 in the denominator */
do {
@@ -39035,7 +39966,7 @@ index 9cd6968..6416f00 100644
unsigned int add =
((pool_size - entropy_count)*anfrac*3) >> s;
-@@ -1207,7 +1204,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+@@ -1227,7 +1224,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
extract_buf(r, tmp);
i = min_t(int, nbytes, EXTRACT_SIZE);
@@ -39044,7 +39975,7 @@ index 9cd6968..6416f00 100644
ret = -EFAULT;
break;
}
-@@ -1590,7 +1587,7 @@ static char sysctl_bootid[16];
+@@ -1668,7 +1665,7 @@ static char sysctl_bootid[16];
static int proc_do_uuid(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -39053,7 +39984,7 @@ index 9cd6968..6416f00 100644
unsigned char buf[64], tmp_uuid[16], *uuid;
uuid = table->data;
-@@ -1620,7 +1617,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
+@@ -1698,7 +1695,7 @@ static int proc_do_uuid(struct ctl_table *table, int write,
static int proc_do_entropy(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -39164,7 +40095,7 @@ index 3a56a13..f8cbd25 100644
return 0;
}
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
-index 50754d20..9561cdc 100644
+index d2406fe..243951a 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -685,7 +685,7 @@ static ssize_t fill_readbuf(struct port *port, char __user *out_buf,
@@ -39186,7 +40117,7 @@ index 50754d20..9561cdc 100644
static int wait_port_writable(struct port *port, bool nonblock)
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
-index 956b7e5..b655045 100644
+index 616f5ae..747bdd0 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -197,7 +197,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
@@ -39197,42 +40128,11 @@ index 956b7e5..b655045 100644
+ clk_ops_no_const *clk_composite_ops;
composite = kzalloc(sizeof(*composite), GFP_KERNEL);
- if (!composite) {
-diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
-index 69a094c..1fa2a8d 100644
---- a/drivers/clk/clk-mux.c
-+++ b/drivers/clk/clk-mux.c
-@@ -114,7 +114,7 @@ const struct clk_ops clk_mux_ro_ops = {
- EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
-
- struct clk *clk_register_mux_table(struct device *dev, const char *name,
-- const char **parent_names, u8 num_parents, unsigned long flags,
-+ const char * const *parent_names, u8 num_parents, unsigned long flags,
- void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock)
- {
-@@ -166,7 +166,7 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
- EXPORT_SYMBOL_GPL(clk_register_mux_table);
-
- struct clk *clk_register_mux(struct device *dev, const char *name,
-- const char **parent_names, u8 num_parents, unsigned long flags,
-+ const char * const *parent_names, u8 num_parents, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_mux_flags, spinlock_t *lock)
- {
+ if (!composite)
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
-index e4c7538..9d5c18a 100644
+index b775fc2..2d45b64 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
-@@ -121,7 +121,7 @@ struct samsung_mux_clock {
- unsigned int id;
- const char *dev_name;
- const char *name;
-- const char **parent_names;
-+ const char * const *parent_names;
- u8 num_parents;
- unsigned long flags;
- unsigned long offset;
@@ -260,7 +260,7 @@ struct samsung_gate_clock {
#define GATE_DA(_id, dname, cname, pname, o, b, f, gf, a) \
__GATE(_id, dname, cname, pname, o, b, f, gf, a)
@@ -39243,7 +40143,7 @@ index e4c7538..9d5c18a 100644
/**
* struct samsung_clk_reg_dump: register dump of clock controller registers.
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
-index dd3a78c..386d49c 100644
+index 82449cd..dcfec30 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -22,6 +22,7 @@
@@ -39254,7 +40154,7 @@ index dd3a78c..386d49c 100644
#include "clk.h"
-@@ -174,7 +175,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
+@@ -170,7 +171,7 @@ static int socfpga_clk_prepare(struct clk_hw *hwclk)
return 0;
}
@@ -39263,7 +40163,7 @@ index dd3a78c..386d49c 100644
.prepare = socfpga_clk_prepare,
.recalc_rate = socfpga_clk_recalc_rate,
.get_parent = socfpga_clk_get_parent,
-@@ -208,8 +209,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
+@@ -203,8 +204,10 @@ static void __init __socfpga_gate_init(struct device_node *node,
socfpga_clk->hw.reg = clk_mgr_base_addr + clk_gate[0];
socfpga_clk->hw.bit_idx = clk_gate[1];
@@ -39277,7 +40177,7 @@ index dd3a78c..386d49c 100644
rc = of_property_read_u32(node, "fixed-divider", &fixed_div);
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
-index de6da95..c98278b 100644
+index 8f26b52..29f2a3a 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -21,6 +21,7 @@
@@ -39297,7 +40197,7 @@ index de6da95..c98278b 100644
.recalc_rate = clk_pll_recalc_rate,
.get_parent = clk_pll_get_parent,
};
-@@ -120,8 +121,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
+@@ -115,8 +116,10 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
pll_clk->hw.hw.init = &init;
pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
@@ -39311,7 +40211,7 @@ index de6da95..c98278b 100644
clk = clk_register(NULL, &pll_clk->hw.hw);
if (WARN_ON(IS_ERR(clk))) {
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
-index b0c18ed..1713a80 100644
+index 0136dfc..4cc55cb 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -675,8 +675,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -39328,7 +40228,7 @@ index b0c18ed..1713a80 100644
result = acpi_processor_register_performance(data->acpi_data, cpu);
if (result)
-@@ -809,7 +812,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
+@@ -810,7 +813,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
@@ -39339,7 +40239,7 @@ index b0c18ed..1713a80 100644
break;
default:
break;
-@@ -903,8 +908,10 @@ static void __init acpi_cpufreq_boost_init(void)
+@@ -904,8 +909,10 @@ static void __init acpi_cpufreq_boost_init(void)
if (!msrs)
return;
@@ -39353,7 +40253,7 @@ index b0c18ed..1713a80 100644
cpu_notifier_register_begin();
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
-index bab67db..91af7e3 100644
+index 528a82bf..78dc025 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -392,7 +392,9 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
@@ -39368,11 +40268,11 @@ index bab67db..91af7e3 100644
ret = cpufreq_register_driver(&dt_cpufreq_driver);
if (ret)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
-index 8ae655c..3141442 100644
+index 7a3c30c..bac142e 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
-@@ -2108,7 +2108,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
- }
+@@ -2197,7 +2197,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
+ read_unlock_irqrestore(&cpufreq_driver_lock, flags);
mutex_lock(&cpufreq_governor_mutex);
- list_del(&governor->governor_list);
@@ -39380,7 +40280,7 @@ index 8ae655c..3141442 100644
mutex_unlock(&cpufreq_governor_mutex);
return;
}
-@@ -2323,7 +2323,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
+@@ -2412,7 +2412,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
@@ -39389,7 +40289,7 @@ index 8ae655c..3141442 100644
.notifier_call = cpufreq_cpu_callback,
};
-@@ -2363,13 +2363,17 @@ int cpufreq_boost_trigger_state(int state)
+@@ -2452,13 +2452,17 @@ int cpufreq_boost_trigger_state(int state)
return 0;
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -39409,7 +40309,7 @@ index 8ae655c..3141442 100644
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_err("%s: Cannot %s BOOST\n",
-@@ -2434,16 +2438,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+@@ -2523,16 +2527,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -39437,63 +40337,45 @@ index 8ae655c..3141442 100644
ret = cpufreq_sysfs_create_file(&boost.attr);
if (ret) {
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
-index 1b44496..b80ff5e 100644
+index 57a39f8..feb9c73 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
-@@ -245,7 +245,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
- struct dbs_data *dbs_data;
- struct od_cpu_dbs_info_s *od_dbs_info = NULL;
- struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
-- struct od_ops *od_ops = NULL;
-+ const struct od_ops *od_ops = NULL;
- struct od_dbs_tuners *od_tuners = NULL;
- struct cs_dbs_tuners *cs_tuners = NULL;
- struct cpu_dbs_common_info *cpu_cdbs;
-@@ -311,7 +311,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
-
- if ((cdata->governor == GOV_CONSERVATIVE) &&
- (!policy->governor->initialized)) {
-- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
-+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
-
- cpufreq_register_notifier(cs_ops->notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-@@ -331,7 +331,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
-
- if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
- (policy->governor->initialized == 1)) {
-- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
-+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
-
- cpufreq_unregister_notifier(cs_ops->notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
+@@ -378,7 +378,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
+ cs_dbs_info->enable = 1;
+ cs_dbs_info->requested_freq = policy->cur;
+ } else {
+- struct od_ops *od_ops = cdata->gov_ops;
++ const struct od_ops *od_ops = cdata->gov_ops;
+ struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
+
+ od_dbs_info->rate_mult = 1;
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
-index cc401d1..8197340 100644
+index 34736f5..da8cf4a 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -212,7 +212,7 @@ struct common_dbs_data {
- void (*exit)(struct dbs_data *dbs_data);
+ void (*exit)(struct dbs_data *dbs_data, bool notify);
/* Governor specific ops, see below */
- void *gov_ops;
+ const void *gov_ops;
- };
- /* Governor Per policy data */
-@@ -232,7 +232,7 @@ struct od_ops {
+ /*
+ * Protects governor's data (struct dbs_data and struct common_dbs_data)
+@@ -234,7 +234,7 @@ struct od_ops {
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation);
void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
-};
+} __no_const;
- struct cs_ops {
- struct notifier_block *notifier_block;
+ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
+ {
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
-index ad3f38f..8f086cd 100644
+index 3c1e10f..02f17af 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
-@@ -524,7 +524,7 @@ static void od_exit(struct dbs_data *dbs_data)
+@@ -523,7 +523,7 @@ static void od_exit(struct dbs_data *dbs_data, bool notify)
define_get_cpu_dbs_routines(od_cpu_dbs_info);
@@ -39524,10 +40406,10 @@ index ad3f38f..8f086cd 100644
}
EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
-index 6f9d27f..14385d1 100644
+index fcb929e..e628818 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
-@@ -134,10 +134,10 @@ struct pstate_funcs {
+@@ -137,10 +137,10 @@ struct pstate_funcs {
struct cpu_defaults {
struct pstate_adjust_policy pid_policy;
struct pstate_funcs funcs;
@@ -39540,7 +40422,7 @@ index 6f9d27f..14385d1 100644
static int hwp_active;
struct perf_limits {
-@@ -722,18 +722,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
+@@ -726,18 +726,18 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
cpu->pstate.current_pstate = pstate;
@@ -39563,10 +40445,10 @@ index 6f9d27f..14385d1 100644
- pstate_funcs.get_vid(cpu);
+ if (pstate_funcs->get_vid)
+ pstate_funcs->get_vid(cpu);
- intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
+ intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
}
-@@ -1057,15 +1057,15 @@ static unsigned int force_load;
+@@ -1070,15 +1070,15 @@ static unsigned int force_load;
static int intel_pstate_msrs_not_valid(void)
{
@@ -39586,7 +40468,7 @@ index 6f9d27f..14385d1 100644
{
pid_params.sample_rate_ms = policy->sample_rate_ms;
pid_params.p_gain_pct = policy->p_gain_pct;
-@@ -1077,12 +1077,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
+@@ -1090,12 +1090,7 @@ static void copy_pid_params(struct pstate_adjust_policy *policy)
static void copy_cpu_funcs(struct pstate_funcs *funcs)
{
@@ -39601,7 +40483,7 @@ index 6f9d27f..14385d1 100644
#if IS_ENABLED(CONFIG_ACPI)
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
-index 529cfd9..0e28fff 100644
+index 5dd95da..abc3837 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -134,10 +134,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
@@ -39773,6 +40655,19 @@ index 5db1478..e90e25e 100644
snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
+diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
+index a5c111b..1113002 100644
+--- a/drivers/cpuidle/dt_idle_states.c
++++ b/drivers/cpuidle/dt_idle_states.c
+@@ -21,7 +21,7 @@
+
+ #include "dt_idle_states.h"
+
+-static int init_state_node(struct cpuidle_state *idle_state,
++static int init_state_node(cpuidle_state_no_const *idle_state,
+ const struct of_device_id *matches,
+ struct device_node *state_node)
+ {
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index fb9f511..213e6cc 100644
--- a/drivers/cpuidle/governor.c
@@ -39888,7 +40783,7 @@ index 592af5f..bb1d583 100644
EXPORT_SYMBOL_GPL(edac_device_alloc_index);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
-index 112d63a..5443a61 100644
+index 33df7d9..0794989 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -154,7 +154,7 @@ static const char * const edac_caps[] = {
@@ -40134,6 +41029,33 @@ index 94a58a0..f5eba42 100644
#define to_dmi_dev_attr(_dev_attr) \
container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index ac1ce4a..321745e 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -690,14 +690,18 @@ static int __init dmi_init(void)
+ if (!dmi_table)
+ goto err_tables;
+
+- bin_attr_smbios_entry_point.size = smbios_entry_point_size;
+- bin_attr_smbios_entry_point.private = smbios_entry_point;
++ pax_open_kernel();
++ *(size_t *)&bin_attr_smbios_entry_point.size = smbios_entry_point_size;
++ *(void **)&bin_attr_smbios_entry_point.private = smbios_entry_point;
++ pax_close_kernel();
+ ret = sysfs_create_bin_file(tables_kobj, &bin_attr_smbios_entry_point);
+ if (ret)
+ goto err_unmap;
+
+- bin_attr_DMI.size = dmi_len;
+- bin_attr_DMI.private = dmi_table;
++ pax_open_kernel();
++ *(size_t *)&bin_attr_DMI.size = dmi_len;
++ *(void **)&bin_attr_DMI.private = dmi_table;
++ pax_close_kernel();
+ ret = sysfs_create_bin_file(tables_kobj, &bin_attr_DMI);
+ if (!ret)
+ return 0;
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index d425374..1da1716 100644
--- a/drivers/firmware/efi/cper.c
@@ -40156,10 +41078,10 @@ index d425374..1da1716 100644
EXPORT_SYMBOL_GPL(cper_next_record_id);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
-index 63226e9..302716e 100644
+index d6144e3..23f9686 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
-@@ -164,14 +164,16 @@ static struct attribute_group efi_subsys_attr_group = {
+@@ -170,14 +170,16 @@ static struct attribute_group efi_subsys_attr_group = {
};
static struct efivars generic_efivars;
@@ -40182,10 +41104,10 @@ index 63226e9..302716e 100644
return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
-index 7b2e049..a253334 100644
+index 756eca8..2336d08 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
-@@ -589,7 +589,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
+@@ -590,7 +590,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
static int
create_efivars_bin_attributes(void)
{
@@ -40246,7 +41168,7 @@ index 2f569aa..26e4f39 100644
}
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
-index cc016c61..d35279e 100644
+index 5de3ed2..d839c56 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -124,7 +124,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
@@ -40258,8 +41180,33 @@ index cc016c61..d35279e 100644
.release = release_firmware_map_entry,
.sysfs_ops = &memmap_attr_ops,
.default_attrs = def_attrs,
+diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
+index c246ac3..6867ca6 100644
+--- a/drivers/gpio/gpio-davinci.c
++++ b/drivers/gpio/gpio-davinci.c
+@@ -442,9 +442,9 @@ static struct irq_chip *davinci_gpio_get_irq_chip(unsigned int irq)
+ return &gpio_unbanked.chip;
+ };
+
+-static struct irq_chip *keystone_gpio_get_irq_chip(unsigned int irq)
++static irq_chip_no_const *keystone_gpio_get_irq_chip(unsigned int irq)
+ {
+- static struct irq_chip gpio_unbanked;
++ static irq_chip_no_const gpio_unbanked;
+
+ gpio_unbanked = *irq_get_chip(irq);
+ return &gpio_unbanked;
+@@ -474,7 +474,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
+ struct davinci_gpio_regs __iomem *g;
+ struct irq_domain *irq_domain = NULL;
+ const struct of_device_id *match;
+- struct irq_chip *irq_chip;
++ irq_chip_no_const *irq_chip;
+ gpio_get_irq_chip_cb_t gpio_get_irq_chip;
+
+ /*
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
-index 3cfcfc6..09d6f117 100644
+index fbf2873..0a37114 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -278,7 +278,7 @@ static int em_gio_probe(struct platform_device *pdev)
@@ -40285,10 +41232,10 @@ index 4ba7ed5..1536b5d 100644
static struct {
spinlock_t lock;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
-index b232397..ce8c4dc 100644
+index 61a731f..d5ca6cb 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
-@@ -1054,7 +1054,7 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
+@@ -1067,7 +1067,7 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
dev_err(bank->dev, "Could not get gpio dbck\n");
}
@@ -40297,7 +41244,7 @@ index b232397..ce8c4dc 100644
{
static int gpio;
int irq_base = 0;
-@@ -1137,7 +1137,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
+@@ -1150,7 +1150,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
const struct omap_gpio_platform_data *pdata;
struct resource *res;
struct gpio_bank *bank;
@@ -40333,10 +41280,10 @@ index c1caa45..f0f97d2 100644
return -EINVAL;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
-index 6bc612b..3932464 100644
+index bf4bd1d..51154a3 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
-@@ -558,8 +558,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
+@@ -569,8 +569,10 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
}
if (gpiochip->irqchip) {
@@ -40349,7 +41296,7 @@ index 6bc612b..3932464 100644
gpiochip->irqchip = NULL;
}
}
-@@ -625,8 +627,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+@@ -636,8 +638,11 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
gpiochip->irqchip = NULL;
return -EINVAL;
}
@@ -40363,19 +41310,391 @@ index 6bc612b..3932464 100644
/*
* Prepare the mapping since the irqchip shall be orthogonal to
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 99f158e..20b6c4c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -1071,7 +1071,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
+ * locking inversion with the driver load path. And the access here is
+ * completely racy anyway. So don't bother with locking for now.
+ */
+- return dev->open_count == 0;
++ return local_read(&dev->open_count) == 0;
+ }
+
+ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index c991973..8eb176b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -419,7 +419,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
+ (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ ? cache_policy_coherent : cache_policy_noncoherent;
+
+- if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
++ if (!dev->dqm->ops->set_cache_memory_policy(dev->dqm,
+ &pdd->qpd,
+ default_policy,
+ alternate_policy,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+index 75312c8..e3684e6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+@@ -293,7 +293,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ goto device_queue_manager_error;
+ }
+
+- if (kfd->dqm->ops.start(kfd->dqm) != 0) {
++ if (kfd->dqm->ops->start(kfd->dqm) != 0) {
+ dev_err(kfd_device,
+ "Error starting queuen manager for device (%x:%x)\n",
+ kfd->pdev->vendor, kfd->pdev->device);
+@@ -349,7 +349,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
+ BUG_ON(kfd == NULL);
+
+ if (kfd->init_complete) {
+- kfd->dqm->ops.stop(kfd->dqm);
++ kfd->dqm->ops->stop(kfd->dqm);
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev, NULL);
+ amd_iommu_free_device(kfd->pdev);
+@@ -372,7 +372,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
+ amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
+ iommu_pasid_shutdown_callback);
+ amd_iommu_set_invalid_ppr_cb(kfd->pdev, iommu_invalid_ppr_cb);
+- kfd->dqm->ops.start(kfd->dqm);
++ kfd->dqm->ops->start(kfd->dqm);
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 4bb7f42..320fcac 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -242,7 +242,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
+
+ BUG_ON(!dqm || !q || !qpd);
+
+- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++ mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (mqd == NULL)
+ return -ENOMEM;
+
+@@ -288,14 +288,14 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
+ mutex_lock(&dqm->lock);
+
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++ mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (mqd == NULL) {
+ retval = -ENOMEM;
+ goto out;
+ }
+ deallocate_hqd(dqm, q);
+ } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
+- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
++ mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+ if (mqd == NULL) {
+ retval = -ENOMEM;
+ goto out;
+@@ -347,7 +347,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+ BUG_ON(!dqm || !q || !q->mqd);
+
+ mutex_lock(&dqm->lock);
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd = dqm->ops->get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+ if (mqd == NULL) {
+ mutex_unlock(&dqm->lock);
+@@ -414,7 +414,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
+ mutex_lock(&dqm->lock);
+ list_add(&n->list, &dqm->queues);
+
+- retval = dqm->ops_asic_specific.register_process(dqm, qpd);
++ retval = dqm->ops_asic_specific->register_process(dqm, qpd);
+
+ dqm->processes_count++;
+
+@@ -502,7 +502,7 @@ int init_pipelines(struct device_queue_manager *dqm,
+
+ memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
+
+- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
++ mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (mqd == NULL) {
+ kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
+ return -ENOMEM;
+@@ -635,7 +635,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ struct mqd_manager *mqd;
+ int retval;
+
+- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
++ mqd = dqm->ops->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+ if (!mqd)
+ return -ENOMEM;
+
+@@ -650,7 +650,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
+ pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
+ pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
+
+- dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
++ dqm->ops_asic_specific->init_sdma_vm(dqm, q, qpd);
+ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval != 0) {
+@@ -712,7 +712,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
+ dqm->queue_count = dqm->processes_count = 0;
+ dqm->sdma_queue_count = 0;
+ dqm->active_runlist = false;
+- retval = dqm->ops_asic_specific.initialize(dqm);
++ retval = dqm->ops_asic_specific->initialize(dqm);
+ if (retval != 0)
+ goto fail_init_pipelines;
+
+@@ -879,7 +879,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ select_sdma_engine_id(q);
+
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd = dqm->ops->get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+
+ if (mqd == NULL) {
+@@ -887,7 +887,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
+ return -ENOMEM;
+ }
+
+- dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
++ dqm->ops_asic_specific->init_sdma_vm(dqm, q, qpd);
+ retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ &q->gart_mqd_addr, &q->properties);
+ if (retval != 0)
+@@ -1060,7 +1060,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+
+ }
+
+- mqd = dqm->ops.get_mqd_manager(dqm,
++ mqd = dqm->ops->get_mqd_manager(dqm,
+ get_mqd_type_from_queue_type(q->properties.type));
+ if (!mqd) {
+ retval = -ENOMEM;
+@@ -1149,7 +1149,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
+ qpd->sh_mem_ape1_limit = limit >> 16;
+ }
+
+- retval = dqm->ops_asic_specific.set_cache_memory_policy(
++ retval = dqm->ops_asic_specific->set_cache_memory_policy(
+ dqm,
+ qpd,
+ default_policy,
+@@ -1172,6 +1172,36 @@ out:
+ return false;
+ }
+
++static const struct device_queue_manager_ops cp_dqm_ops = {
++ .create_queue = create_queue_cpsch,
++ .initialize = initialize_cpsch,
++ .start = start_cpsch,
++ .stop = stop_cpsch,
++ .destroy_queue = destroy_queue_cpsch,
++ .update_queue = update_queue,
++ .get_mqd_manager = get_mqd_manager_nocpsch,
++ .register_process = register_process_nocpsch,
++ .unregister_process = unregister_process_nocpsch,
++ .uninitialize = uninitialize_nocpsch,
++ .create_kernel_queue = create_kernel_queue_cpsch,
++ .destroy_kernel_queue = destroy_kernel_queue_cpsch,
++ .set_cache_memory_policy = set_cache_memory_policy,
++};
++
++static const struct device_queue_manager_ops no_cp_dqm_ops = {
++ .start = start_nocpsch,
++ .stop = stop_nocpsch,
++ .create_queue = create_queue_nocpsch,
++ .destroy_queue = destroy_queue_nocpsch,
++ .update_queue = update_queue,
++ .get_mqd_manager = get_mqd_manager_nocpsch,
++ .register_process = register_process_nocpsch,
++ .unregister_process = unregister_process_nocpsch,
++ .initialize = initialize_nocpsch,
++ .uninitialize = uninitialize_nocpsch,
++ .set_cache_memory_policy = set_cache_memory_policy,
++};
++
+ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ {
+ struct device_queue_manager *dqm;
+@@ -1189,33 +1219,11 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+ case KFD_SCHED_POLICY_HWS:
+ case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
+ /* initialize dqm for cp scheduling */
+- dqm->ops.create_queue = create_queue_cpsch;
+- dqm->ops.initialize = initialize_cpsch;
+- dqm->ops.start = start_cpsch;
+- dqm->ops.stop = stop_cpsch;
+- dqm->ops.destroy_queue = destroy_queue_cpsch;
+- dqm->ops.update_queue = update_queue;
+- dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
+- dqm->ops.register_process = register_process_nocpsch;
+- dqm->ops.unregister_process = unregister_process_nocpsch;
+- dqm->ops.uninitialize = uninitialize_nocpsch;
+- dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
+- dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
+- dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
++ dqm->ops = &cp_dqm_ops;
+ break;
+ case KFD_SCHED_POLICY_NO_HWS:
+ /* initialize dqm for no cp scheduling */
+- dqm->ops.start = start_nocpsch;
+- dqm->ops.stop = stop_nocpsch;
+- dqm->ops.create_queue = create_queue_nocpsch;
+- dqm->ops.destroy_queue = destroy_queue_nocpsch;
+- dqm->ops.update_queue = update_queue;
+- dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
+- dqm->ops.register_process = register_process_nocpsch;
+- dqm->ops.unregister_process = unregister_process_nocpsch;
+- dqm->ops.initialize = initialize_nocpsch;
+- dqm->ops.uninitialize = uninitialize_nocpsch;
+- dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
++ dqm->ops = &no_cp_dqm_ops;
+ break;
+ default:
+ BUG();
+@@ -1224,15 +1232,15 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
+
+ switch (dev->device_info->asic_family) {
+ case CHIP_CARRIZO:
+- device_queue_manager_init_vi(&dqm->ops_asic_specific);
++ device_queue_manager_init_vi(dqm);
+ break;
+
+ case CHIP_KAVERI:
+- device_queue_manager_init_cik(&dqm->ops_asic_specific);
++ device_queue_manager_init_cik(dqm);
+ break;
+ }
+
+- if (dqm->ops.initialize(dqm) != 0) {
++ if (dqm->ops->initialize(dqm) != 0) {
+ kfree(dqm);
+ return NULL;
+ }
+@@ -1244,6 +1252,6 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
+ {
+ BUG_ON(!dqm);
+
+- dqm->ops.uninitialize(dqm);
++ dqm->ops->uninitialize(dqm);
+ kfree(dqm);
+ }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
-index 488f51d..301d462 100644
+index ec4036a..3ef0646 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
-@@ -118,7 +118,7 @@ struct device_queue_manager_ops {
- enum cache_policy alternate_policy,
- void __user *alternate_aperture_base,
- uint64_t alternate_aperture_size);
--};
-+} __no_const;
+@@ -154,8 +154,8 @@ struct device_queue_manager_asic_ops {
+ */
- /**
- * struct device_queue_manager
+ struct device_queue_manager {
+- struct device_queue_manager_ops ops;
+- struct device_queue_manager_asic_ops ops_asic_specific;
++ struct device_queue_manager_ops *ops;
++ struct device_queue_manager_asic_ops *ops_asic_specific;
+
+ struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
+ struct packet_manager packets;
+@@ -178,8 +178,8 @@ struct device_queue_manager {
+ bool active_runlist;
+ };
+
+-void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
+-void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
++void device_queue_manager_init_cik(struct device_queue_manager *dqm);
++void device_queue_manager_init_vi(struct device_queue_manager *dqm);
+ void program_sh_mem_settings(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd);
+ int init_pipelines(struct device_queue_manager *dqm,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+index 9ce8a20..1ca4e22 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+@@ -36,12 +36,16 @@ static int initialize_cpsch_cik(struct device_queue_manager *dqm);
+ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd);
+
+-void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops)
++static const struct device_queue_manager_asic_ops cik_dqm_asic_ops = {
++ .set_cache_memory_policy = set_cache_memory_policy_cik,
++ .register_process = register_process_cik,
++ .initialize = initialize_cpsch_cik,
++ .init_sdma_vm = init_sdma_vm,
++};
++
++void device_queue_manager_init_cik(struct device_queue_manager *dqm)
+ {
+- ops->set_cache_memory_policy = set_cache_memory_policy_cik;
+- ops->register_process = register_process_cik;
+- ops->initialize = initialize_cpsch_cik;
+- ops->init_sdma_vm = init_sdma_vm;
++ dqm->ops_asic_specific = &cik_dqm_asic_ops;
+ }
+
+ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+index 4c15212..61bfab8 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+@@ -35,14 +35,18 @@ static int initialize_cpsch_vi(struct device_queue_manager *dqm);
+ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
+ struct qcm_process_device *qpd);
+
+-void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops)
++static const struct device_queue_manager_asic_ops vi_dqm_asic_ops = {
++ .set_cache_memory_policy = set_cache_memory_policy_vi,
++ .register_process = register_process_vi,
++ .initialize = initialize_cpsch_vi,
++ .init_sdma_vm = init_sdma_vm,
++};
++
++void device_queue_manager_init_vi(struct device_queue_manager *dqm)
+ {
+ pr_warn("amdkfd: VI DQM is not currently supported\n");
+
+- ops->set_cache_memory_policy = set_cache_memory_policy_vi;
+- ops->register_process = register_process_vi;
+- ops->initialize = initialize_cpsch_vi;
+- ops->init_sdma_vm = init_sdma_vm;
++ dqm->ops_asic_specific = &vi_dqm_asic_ops;
+ }
+
+ static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+index 7f134aa..cd34d4a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+@@ -50,8 +50,8 @@ static void interrupt_wq(struct work_struct *);
+
+ int kfd_interrupt_init(struct kfd_dev *kfd)
+ {
+- void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
+- kfd->device_info->ih_ring_entry_size,
++ void *interrupt_ring = kmalloc_array(kfd->device_info->ih_ring_entry_size,
++ KFD_INTERRUPT_RING_SIZE,
+ GFP_KERNEL);
+ if (!interrupt_ring)
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index 8fa8941..5ae07df 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -56,7 +56,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+ switch (type) {
+ case KFD_QUEUE_TYPE_DIQ:
+ case KFD_QUEUE_TYPE_HIQ:
+- kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
++ kq->mqd = dev->dqm->ops->get_mqd_manager(dev->dqm,
+ KFD_MQD_TYPE_HIQ);
+ break;
+ default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 5940531..a75b0e5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -40389,6 +41708,80 @@ index 5940531..a75b0e5 100644
struct kernel_queue {
struct kernel_queue_ops ops;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 7b69070..d7bd78b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -194,7 +194,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+
+ if (list_empty(&pqm->queues)) {
+ pdd->qpd.pqm = pqm;
+- dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
++ dev->dqm->ops->register_process(dev->dqm, &pdd->qpd);
+ }
+
+ pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
+@@ -220,7 +220,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ goto err_create_queue;
+ pqn->q = q;
+ pqn->kq = NULL;
+- retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
++ retval = dev->dqm->ops->create_queue(dev->dqm, q, &pdd->qpd,
+ &q->properties.vmid);
+ pr_debug("DQM returned %d for create_queue\n", retval);
+ print_queue(q);
+@@ -234,7 +234,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ kq->queue->properties.queue_id = *qid;
+ pqn->kq = kq;
+ pqn->q = NULL;
+- retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
++ retval = dev->dqm->ops->create_kernel_queue(dev->dqm,
+ kq, &pdd->qpd);
+ break;
+ default:
+@@ -265,7 +265,7 @@ err_allocate_pqn:
+ /* check if queues list is empty unregister process from device */
+ clear_bit(*qid, pqm->queue_slot_bitmap);
+ if (list_empty(&pqm->queues))
+- dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
++ dev->dqm->ops->unregister_process(dev->dqm, &pdd->qpd);
+ return retval;
+ }
+
+@@ -306,13 +306,13 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+ if (pqn->kq) {
+ /* destroy kernel queue (DIQ) */
+ dqm = pqn->kq->dev->dqm;
+- dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
++ dqm->ops->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
+ kernel_queue_uninit(pqn->kq);
+ }
+
+ if (pqn->q) {
+ dqm = pqn->q->device->dqm;
+- retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
++ retval = dqm->ops->destroy_queue(dqm, &pdd->qpd, pqn->q);
+ if (retval != 0)
+ return retval;
+
+@@ -324,7 +324,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+ clear_bit(qid, pqm->queue_slot_bitmap);
+
+ if (list_empty(&pqm->queues))
+- dqm->ops.unregister_process(dqm, &pdd->qpd);
++ dqm->ops->unregister_process(dqm, &pdd->qpd);
+
+ return retval;
+ }
+@@ -349,7 +349,7 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
+ pqn->q->properties.queue_percent = p->queue_percent;
+ pqn->q->properties.priority = p->priority;
+
+- retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
++ retval = pqn->q->device->dqm->ops->update_queue(pqn->q->device->dqm,
+ pqn->q);
+ if (retval != 0)
+ return retval;
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 9b23525..65f4110 100644
--- a/drivers/gpu/drm/drm_context.c
@@ -40541,10 +41934,10 @@ index 9b23525..65f4110 100644
dev->driver->context_dtor(dev, ctx->handle);
drm_legacy_ctxbitmap_free(dev, ctx->handle);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
-index 800a025..c88f1a4 100644
+index fed7483..5bc0335 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
-@@ -4179,7 +4179,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
+@@ -4174,7 +4174,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
goto done;
}
@@ -40554,10 +41947,10 @@ index 800a025..c88f1a4 100644
ret = -EFAULT;
goto done;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
-index 48f7359..8c3b594 100644
+index b7bf4ce..585cf3b 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
-@@ -448,7 +448,7 @@ void drm_unplug_dev(struct drm_device *dev)
+@@ -434,7 +434,7 @@ void drm_unplug_dev(struct drm_device *dev)
drm_device_set_unplugged(dev);
@@ -40566,7 +41959,7 @@ index 48f7359..8c3b594 100644
drm_put_dev(dev);
}
mutex_unlock(&drm_global_mutex);
-@@ -596,10 +596,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+@@ -582,10 +582,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
if (drm_ht_create(&dev->map_hash, 12))
goto err_minors;
@@ -40585,7 +41978,7 @@ index 48f7359..8c3b594 100644
if (drm_core_check_feature(dev, DRIVER_GEM)) {
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
-index 076dd60..e4a4ba7 100644
+index c59ce4d..056d413 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -89,7 +89,7 @@ int drm_open(struct inode *inode, struct file *filp)
@@ -40606,7 +41999,7 @@ index 076dd60..e4a4ba7 100644
drm_minor_release(minor);
return retcode;
}
-@@ -376,7 +376,7 @@ int drm_release(struct inode *inode, struct file *filp)
+@@ -377,7 +377,7 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&drm_global_mutex);
@@ -40615,7 +42008,7 @@ index 076dd60..e4a4ba7 100644
mutex_lock(&dev->struct_mutex);
list_del(&file_priv->lhead);
-@@ -389,10 +389,10 @@ int drm_release(struct inode *inode, struct file *filp)
+@@ -392,10 +392,10 @@ int drm_release(struct inode *inode, struct file *filp)
* Begin inline drm_release
*/
@@ -40626,8 +42019,8 @@ index 076dd60..e4a4ba7 100644
- dev->open_count);
+ local_read(&dev->open_count));
- /* Release any auth tokens that might point to this file_priv,
- (do that under the drm_global_mutex) */
+ /* if the master has gone away we can't do anything with the lock */
+ if (file_priv->minor->master)
@@ -465,7 +465,7 @@ int drm_release(struct inode *inode, struct file *filp)
* End inline drm_release
*/
@@ -40782,10 +42175,10 @@ index 9cfcd0a..7142a7f 100644
ret = drm_ioctl(filp, cmd, arg);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
-index 266dcd6..d0194d9 100644
+index b1d303f..c59012c 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
-@@ -663,7 +663,7 @@ long drm_ioctl(struct file *filp,
+@@ -650,7 +650,7 @@ long drm_ioctl(struct file *filp,
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev;
const struct drm_ioctl_desc *ioctl = NULL;
@@ -40875,8 +42268,21 @@ index 93ec5dc..82acbaf 100644
int front_offset;
} drm_i810_private_t;
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 82bbe3f..ce004bf 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -480,7 +480,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
+ seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+ count, size);
+
+- seq_printf(m, "%zu [%lu] gtt total\n",
++ seq_printf(m, "%llu [%llu] gtt total\n",
+ dev_priv->gtt.base.total,
+ dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
-index 68e0c85..3303192 100644
+index d2df321..f746478 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -162,6 +162,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
@@ -40898,10 +42304,10 @@ index 68e0c85..3303192 100644
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-index a3190e79..86b06cb 100644
+index 5e6b4a2..6ba2c85 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-@@ -936,12 +936,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+@@ -935,12 +935,12 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
static int
validate_exec_list(struct drm_device *dev,
struct drm_i915_gem_exec_object2 *exec,
@@ -40916,6 +42322,134 @@ index a3190e79..86b06cb 100644
invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
if (USES_FULL_PPGTT(dev))
+diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
+index 31e8269..7055934 100644
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -2360,10 +2360,10 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
+ }
+
+ static int gen8_gmch_probe(struct drm_device *dev,
+- size_t *gtt_total,
+- size_t *stolen,
+- phys_addr_t *mappable_base,
+- unsigned long *mappable_end)
++ uint64_t *gtt_total,
++ uint64_t *stolen,
++ uint64_t *mappable_base,
++ uint64_t *mappable_end)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int gtt_size;
+@@ -2408,10 +2408,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
+ }
+
+ static int gen6_gmch_probe(struct drm_device *dev,
+- size_t *gtt_total,
+- size_t *stolen,
+- phys_addr_t *mappable_base,
+- unsigned long *mappable_end)
++ uint64_t *gtt_total,
++ uint64_t *stolen,
++ uint64_t *mappable_base,
++ uint64_t *mappable_end)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int gtt_size;
+@@ -2425,7 +2425,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
+ * a coarse sanity check.
+ */
+ if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
+- DRM_ERROR("Unknown GMADR size (%lx)\n",
++ DRM_ERROR("Unknown GMADR size (%llx)\n",
+ dev_priv->gtt.mappable_end);
+ return -ENXIO;
+ }
+@@ -2459,10 +2459,10 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
+ }
+
+ static int i915_gmch_probe(struct drm_device *dev,
+- size_t *gtt_total,
+- size_t *stolen,
+- phys_addr_t *mappable_base,
+- unsigned long *mappable_end)
++ uint64_t *gtt_total,
++ uint64_t *stolen,
++ uint64_t *mappable_base,
++ uint64_t *mappable_end)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+@@ -2527,10 +2527,10 @@ int i915_gem_gtt_init(struct drm_device *dev)
+ gtt->base.dev = dev;
+
+ /* GMADR is the PCI mmio aperture into the global GTT. */
+- DRM_INFO("Memory usable by graphics device = %zdM\n",
++ DRM_INFO("Memory usable by graphics device = %lldM\n",
+ gtt->base.total >> 20);
+- DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+- DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
++ DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
++ DRM_DEBUG_DRIVER("GTT stolen size = %lldM\n", gtt->stolen_size >> 20);
+ #ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped)
+ DRM_INFO("VT-d active for gfx access\n");
+diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
+index 0d46dd2..1171c00 100644
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
+@@ -233,8 +233,8 @@ struct i915_address_space {
+ struct drm_mm mm;
+ struct drm_device *dev;
+ struct list_head global_link;
+- unsigned long start; /* Start offset always 0 for dri2 */
+- size_t total; /* size addr space maps (ex. 2GB for ggtt) */
++ uint64_t start; /* Start offset always 0 for dri2 */
++ uint64_t total; /* size addr space maps (ex. 2GB for ggtt) */
+
+ struct {
+ dma_addr_t addr;
+@@ -300,11 +300,11 @@ struct i915_address_space {
+ */
+ struct i915_gtt {
+ struct i915_address_space base;
+- size_t stolen_size; /* Total size of stolen memory */
++ uint64_t stolen_size; /* Total size of stolen memory */
+
+- unsigned long mappable_end; /* End offset that we can CPU map */
++ uint64_t mappable_end; /* End offset that we can CPU map */
+ struct io_mapping *mappable; /* Mapping to our CPU mappable region */
+- phys_addr_t mappable_base; /* PA of our GMADR */
++ uint64_t mappable_base; /* PA of our GMADR */
+
+ /** "Graphics Stolen Memory" holds the global PTEs */
+ void __iomem *gsm;
+@@ -314,9 +314,9 @@ struct i915_gtt {
+ int mtrr;
+
+ /* global gtt ops */
+- int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
+- size_t *stolen, phys_addr_t *mappable_base,
+- unsigned long *mappable_end);
++ int (*gtt_probe)(struct drm_device *dev, uint64_t *gtt_total,
++ uint64_t *stolen, uint64_t *mappable_base,
++ uint64_t *mappable_end);
+ };
+
+ struct i915_hw_ppgtt {
+diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
+index 8b5b784..78711f6 100644
+--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
++++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
+@@ -310,7 +310,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
+ if (dev_priv->mm.stolen_base == 0)
+ return 0;
+
+- DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
++ DRM_DEBUG_KMS("found %lld bytes of stolen memory at %08lx\n",
+ dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
+
+ if (INTEL_INFO(dev)->gen >= 8) {
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 23aa04c..1d25960 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
@@ -40975,10 +42509,10 @@ index 23aa04c..1d25960 100644
ret = drm_ioctl(filp, cmd, arg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
-index 57c8878..8ef38a7 100644
+index 107c6c0..e1926b0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -13617,13 +13617,13 @@ struct intel_quirk {
+@@ -14501,13 +14501,13 @@ struct intel_quirk {
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
@@ -40994,7 +42528,7 @@ index 57c8878..8ef38a7 100644
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
-@@ -13631,18 +13631,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+@@ -14515,18 +14515,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
return 1;
}
@@ -41141,10 +42675,10 @@ index 0190b69..60c3eaf 100644
#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
-index cd6dae0..f25eb48 100644
+index 477cbb1..109b826 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
-@@ -943,7 +943,8 @@ static struct drm_driver
+@@ -946,7 +946,8 @@ static struct drm_driver
driver_stub = {
.driver_features =
DRIVER_USE_AGP |
@@ -41180,7 +42714,7 @@ index 462679a..88e32a7 100644
if (nr < DRM_COMMAND_BASE)
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
-index 18f4497..10f6025 100644
+index 7464aef3..c63ae4f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -130,11 +130,11 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
@@ -41200,7 +42734,7 @@ index 18f4497..10f6025 100644
};
static int
-@@ -198,11 +198,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+@@ -207,11 +207,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
}
const struct ttm_mem_type_manager_func nouveau_gart_manager = {
@@ -41217,7 +42751,7 @@ index 18f4497..10f6025 100644
};
/*XXX*/
-@@ -271,11 +271,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+@@ -280,11 +280,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
}
const struct ttm_mem_type_manager_func nv04_gart_manager = {
@@ -41247,8 +42781,21 @@ index c7592ec..dd45ebc 100644
}
static const struct vga_switcheroo_client_ops
+diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
+index 778372b..4b81cb4 100644
+--- a/drivers/gpu/drm/omapdrm/Makefile
++++ b/drivers/gpu/drm/omapdrm/Makefile
+@@ -3,7 +3,7 @@
+ # Direct Rendering Infrastructure (DRI)
+ #
+
+-ccflags-y := -Iinclude/drm -Werror
++ccflags-y := -Iinclude/drm
+ omapdrm-y := omap_drv.o \
+ omap_irq.o \
+ omap_debugfs.o \
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
-index f33251d..22f6cb1 100644
+index fdc1833..f307630 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
@@ -41305,7 +42852,7 @@ index 6911b8c..89d6867 100644
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
-index 7c6cafe..460f542 100644
+index 01a8694..584fb48 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -290,10 +290,10 @@ struct qxl_device {
@@ -41324,7 +42871,7 @@ index 7c6cafe..460f542 100644
wait_queue_head_t display_event;
wait_queue_head_t cursor_event;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
-index 7354a4c..f37d7f9 100644
+index bda5c5f..140ac46 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -183,7 +183,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
@@ -41345,7 +42892,7 @@ index 7354a4c..f37d7f9 100644
sizeof(reloc))) {
ret = -EFAULT;
goto out_free_bos;
-@@ -296,10 +296,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+@@ -282,10 +282,10 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
@@ -41595,10 +43142,10 @@ index b928c17..e5d9400 100644
if (regcomp
(&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
-index 604c44d..6eb6c4b 100644
+index d8319da..d6e066f 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
-@@ -1247,7 +1247,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+@@ -1253,7 +1253,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
@@ -41709,7 +43256,7 @@ index 15aee72..cda326e 100644
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
-index edafd3c..3af7c9c 100644
+index 06ac59fe..57e0681 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -961,7 +961,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
@@ -41789,6 +43336,21 @@ index 7591d89..463e2b6 100644
err = drm_debugfs_create_files(sor->debugfs_files,
ARRAY_SIZE(debugfs_files),
+diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
+index deeca48..54e1b6c 100644
+--- a/drivers/gpu/drm/tilcdc/Makefile
++++ b/drivers/gpu/drm/tilcdc/Makefile
+@@ -1,7 +1,7 @@
+ ccflags-y := -Iinclude/drm
+-ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+- ccflags-y += -Werror
+-endif
++#ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
++# ccflags-y += -Werror
++#endif
+
+ obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \
+ tilcdc_slave_compat.dtb.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index aa0bd054..aea6a01 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -41909,7 +43471,7 @@ index 025c429..314062f 100644
/* set zero flag for page allocation if required */
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
-index 01e1d27..aaa018a 100644
+index 624d941..106fa1f 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -56,7 +56,7 @@
@@ -41921,7 +43483,7 @@ index 01e1d27..aaa018a 100644
/* times are in msecs */
#define IS_UNDEFINED (0)
#define IS_WC (1<<1)
-@@ -413,7 +413,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+@@ -416,7 +416,7 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
* @nr_free: If set to true will free all pages in pool
* @use_static: Safe to use static buffer
**/
@@ -41930,7 +43492,7 @@ index 01e1d27..aaa018a 100644
bool use_static)
{
static struct page *static_buf[NUM_PAGES_TO_ALLOC];
-@@ -421,8 +421,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
+@@ -424,8 +424,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
struct dma_page *dma_p, *tmp;
struct page **pages_to_free;
struct list_head d_pages;
@@ -41940,7 +43502,7 @@ index 01e1d27..aaa018a 100644
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
-@@ -499,7 +498,8 @@ restart:
+@@ -502,7 +501,8 @@ restart:
/* remove range of pages from the pool */
if (freed_pages) {
ttm_pool_update_free_locked(pool, freed_pages);
@@ -41950,7 +43512,7 @@ index 01e1d27..aaa018a 100644
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
-@@ -936,7 +936,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+@@ -939,7 +939,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
struct dma_page *d_page, *next;
enum pool_type type;
bool is_cached = false;
@@ -41959,7 +43521,7 @@ index 01e1d27..aaa018a 100644
unsigned long irq_flags;
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
-@@ -1012,7 +1012,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+@@ -1014,7 +1014,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
static unsigned start_pool;
unsigned idx = 0;
unsigned pool_offset;
@@ -41968,7 +43530,7 @@ index 01e1d27..aaa018a 100644
struct device_pools *p;
unsigned long freed = 0;
-@@ -1025,7 +1025,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+@@ -1027,7 +1027,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
goto out;
pool_offset = ++start_pool % _manager->npools;
list_for_each_entry(p, &_manager->pools, pools) {
@@ -41977,7 +43539,7 @@ index 01e1d27..aaa018a 100644
if (!p->dev)
continue;
-@@ -1039,7 +1039,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+@@ -1041,7 +1041,7 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
freed += nr_free - shrink_pages;
@@ -42094,6 +43656,53 @@ index 1319433..a993b0c 100644
irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
case VIA_IRQ_ABSOLUTE:
break;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+index db8b491..d87b27c 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
++++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+@@ -34,7 +34,7 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
+
+- seq_printf(m, "fence %ld %lld\n",
++ seq_printf(m, "fence %lld %lld\n",
+ atomic64_read(&vgdev->fence_drv.last_seq),
+ vgdev->fence_drv.sync_seq);
+ return 0;
+diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
+index 1da6326..98dd385 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
++++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
+@@ -61,7 +61,7 @@ static void virtio_timeline_value_str(struct fence *f, char *str, int size)
+ {
+ struct virtio_gpu_fence *fence = to_virtio_fence(f);
+
+- snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq));
++ snprintf(str, size, "%llu", atomic64_read(&fence->drv->last_seq));
+ }
+
+ static const struct fence_ops virtio_fence_ops = {
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
+index b092d7b..3bbecd9 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
+@@ -197,11 +197,11 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+ }
+
+ static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
+- ttm_bo_man_init,
+- ttm_bo_man_takedown,
+- ttm_bo_man_get_node,
+- ttm_bo_man_put_node,
+- ttm_bo_man_debug
++ .init = &ttm_bo_man_init,
++ .takedown = &ttm_bo_man_takedown,
++ .get_node = &ttm_bo_man_get_node,
++ .put_node = &ttm_bo_man_put_node,
++ .debug = &ttm_bo_man_debug
+ };
+
+ static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d26a6da..5fa41ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -42246,10 +43855,10 @@ index 37ac7b5..d52a5c9 100644
/* copy over all the bus versions */
if (dev->bus && dev->bus->pm) {
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
-index 722a925..594c312 100644
+index e6fce23..85949a0 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
-@@ -2552,7 +2552,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
+@@ -2550,7 +2550,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
int hid_add_device(struct hid_device *hdev)
{
@@ -42258,7 +43867,7 @@ index 722a925..594c312 100644
int ret;
if (WARN_ON(hdev->status & HID_STAT_ADDED))
-@@ -2595,7 +2595,7 @@ int hid_add_device(struct hid_device *hdev)
+@@ -2593,7 +2593,7 @@ int hid_add_device(struct hid_device *hdev)
/* XXX hack, any other cleaner solution after the driver core
* is converted to allow more than 20 bytes as the device name? */
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
@@ -42268,18 +43877,9 @@ index 722a925..594c312 100644
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
-index 5614fee..8301fbf 100644
+index 5614fee..8a6f5f6 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
-@@ -34,7 +34,7 @@ struct hid_sensor_custom_field {
- int report_id;
- char group_name[HID_CUSTOM_NAME_LENGTH];
- struct hid_sensor_hub_attribute_info attribute;
-- struct device_attribute sd_attrs[HID_CUSTOM_MAX_CORE_ATTRS];
-+ device_attribute_no_const sd_attrs[HID_CUSTOM_MAX_CORE_ATTRS];
- char attr_name[HID_CUSTOM_TOTAL_ATTRS][HID_CUSTOM_NAME_LENGTH];
- struct attribute *attrs[HID_CUSTOM_TOTAL_ATTRS];
- struct attribute_group hid_custom_attribute_group;
@@ -590,7 +590,7 @@ static int hid_sensor_custom_add_attributes(struct hid_sensor_custom
j = 0;
while (j < HID_CUSTOM_TOTAL_ATTRS &&
@@ -42303,10 +43903,10 @@ index c13fb5b..55a3802 100644
*off += size;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
-index 54da66d..aa3a3d7 100644
+index 603ce97..7f27468 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
-@@ -373,7 +373,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
+@@ -382,7 +382,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
int ret = 0;
next_gpadl_handle =
@@ -42316,7 +43916,7 @@ index 54da66d..aa3a3d7 100644
ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
if (ret)
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
-index d3943bc..3de28a9 100644
+index d3943bc..597fd1e 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -118,7 +118,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
@@ -42324,7 +43924,7 @@ index d3943bc..3de28a9 100644
u32 output_address_hi = output_address >> 32;
u32 output_address_lo = output_address & 0xFFFFFFFF;
- void *hypercall_page = hv_context.hypercall_page;
-+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
++ void *hypercall_page = (void *)ktva_ktla((unsigned long)hv_context.hypercall_page);
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
"=a"(hv_status_lo) : "d" (control_hi),
@@ -42338,7 +43938,7 @@ index d3943bc..3de28a9 100644
if (!virtaddr)
goto cleanup;
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
-index cb5b7dc..6052f22 100644
+index 8a725cd..91abaf0 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -469,7 +469,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
@@ -42350,7 +43950,7 @@ index cb5b7dc..6052f22 100644
static int dm_ring_size = (5 * PAGE_SIZE);
-@@ -941,7 +941,7 @@ static void hot_add_req(struct work_struct *dummy)
+@@ -943,7 +943,7 @@ static void hot_add_req(struct work_struct *dummy)
pr_info("Memory hot add failed\n");
dm->state = DM_INITIALIZED;
@@ -42359,7 +43959,7 @@ index cb5b7dc..6052f22 100644
vmbus_sendpacket(dm->dev->channel, &resp,
sizeof(struct dm_hot_add_response),
(unsigned long)NULL,
-@@ -1022,7 +1022,7 @@ static void post_status(struct hv_dynmem_device *dm)
+@@ -1024,7 +1024,7 @@ static void post_status(struct hv_dynmem_device *dm)
memset(&status, 0, sizeof(struct dm_status));
status.hdr.type = DM_STATUS_REPORT;
status.hdr.size = sizeof(struct dm_status);
@@ -42368,7 +43968,7 @@ index cb5b7dc..6052f22 100644
/*
* The host expects the guest to report free and committed memory.
-@@ -1046,7 +1046,7 @@ static void post_status(struct hv_dynmem_device *dm)
+@@ -1048,7 +1048,7 @@ static void post_status(struct hv_dynmem_device *dm)
* send the status. This can happen if we were interrupted
* after we picked our transaction ID.
*/
@@ -42377,7 +43977,7 @@ index cb5b7dc..6052f22 100644
return;
/*
-@@ -1191,7 +1191,7 @@ static void balloon_up(struct work_struct *dummy)
+@@ -1193,7 +1193,7 @@ static void balloon_up(struct work_struct *dummy)
*/
do {
@@ -42386,7 +43986,7 @@ index cb5b7dc..6052f22 100644
ret = vmbus_sendpacket(dm_device.dev->channel,
bl_resp,
bl_resp->hdr.size,
-@@ -1237,7 +1237,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
+@@ -1239,7 +1239,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
memset(&resp, 0, sizeof(struct dm_unballoon_response));
resp.hdr.type = DM_UNBALLOON_RESPONSE;
@@ -42395,7 +43995,7 @@ index cb5b7dc..6052f22 100644
resp.hdr.size = sizeof(struct dm_unballoon_response);
vmbus_sendpacket(dm_device.dev->channel, &resp,
-@@ -1298,7 +1298,7 @@ static void version_resp(struct hv_dynmem_device *dm,
+@@ -1300,7 +1300,7 @@ static void version_resp(struct hv_dynmem_device *dm,
memset(&version_req, 0, sizeof(struct dm_version_request));
version_req.hdr.type = DM_VERSION_REQUEST;
version_req.hdr.size = sizeof(struct dm_version_request);
@@ -42404,7 +44004,7 @@ index cb5b7dc..6052f22 100644
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
version_req.is_last_attempt = 1;
-@@ -1471,7 +1471,7 @@ static int balloon_probe(struct hv_device *dev,
+@@ -1473,7 +1473,7 @@ static int balloon_probe(struct hv_device *dev,
memset(&version_req, 0, sizeof(struct dm_version_request));
version_req.hdr.type = DM_VERSION_REQUEST;
version_req.hdr.size = sizeof(struct dm_version_request);
@@ -42413,7 +44013,7 @@ index cb5b7dc..6052f22 100644
version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
version_req.is_last_attempt = 0;
-@@ -1502,7 +1502,7 @@ static int balloon_probe(struct hv_device *dev,
+@@ -1504,7 +1504,7 @@ static int balloon_probe(struct hv_device *dev,
memset(&cap_msg, 0, sizeof(struct dm_capabilities));
cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
cap_msg.hdr.size = sizeof(struct dm_capabilities);
@@ -42423,7 +44023,7 @@ index cb5b7dc..6052f22 100644
cap_msg.caps.cap_bits.balloon = 1;
cap_msg.caps.cap_bits.hot_add = 1;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
-index 887287a..238a626 100644
+index cddc0c9..2eb587d 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -645,7 +645,7 @@ enum vmbus_connect_state {
@@ -42433,8 +44033,8 @@ index 887287a..238a626 100644
- atomic_t next_gpadl_handle;
+ atomic_unchecked_t next_gpadl_handle;
+ struct completion unload_event;
/*
- * Represents channel interrupts. Each bit position represents a
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 579bdf9..0dac21d5 100644
--- a/drivers/hwmon/acpi_power_meter.c
@@ -42508,10 +44108,10 @@ index cccef87..06ce8ec 100644
{
sysfs_attr_init(&attr->attr);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
-index ed303ba..e24bd26f 100644
+index 3e03379..ec521d3 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
-@@ -782,7 +782,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
+@@ -783,7 +783,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
@@ -42520,6 +44120,19 @@ index ed303ba..e24bd26f 100644
.notifier_call = coretemp_cpu_callback,
};
+diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
+index c848789..e9e9217 100644
+--- a/drivers/hwmon/dell-smm-hwmon.c
++++ b/drivers/hwmon/dell-smm-hwmon.c
+@@ -819,7 +819,7 @@ static const struct i8k_config_data i8k_config_data[] = {
+ },
+ };
+
+-static struct dmi_system_id i8k_dmi_table[] __initdata = {
++static const struct dmi_system_id i8k_dmi_table[] __initconst = {
+ {
+ .ident = "Dell Inspiron",
+ .matches = {
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 7a8a6fb..015c1fd 100644
--- a/drivers/hwmon/ibmaem.c
@@ -42633,7 +44246,7 @@ index f2e47c7..45d7941 100644
label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
if (!label)
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
-index d4f0935..7420593 100644
+index 497a7f8..3fffedf 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -169,7 +169,7 @@ struct sht15_data {
@@ -42750,7 +44363,7 @@ index 71c7a39..71dd3e0 100644
if (IS_ERR(rdwr_pa[i].buf)) {
res = PTR_ERR(rdwr_pa[i].buf);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
-index 0b510ba..4fbb5085 100644
+index 64a6b82..a524354 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
@@ -42762,11 +44375,24 @@ index 0b510ba..4fbb5085 100644
drive->dma = 0;
}
}
+diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
+index 56b9708..980b63b 100644
+--- a/drivers/ide/ide-disk.c
++++ b/drivers/ide/ide-disk.c
+@@ -178,7 +178,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+ * 1073741822 == 549756 MB or 48bit addressing fake drive
+ */
+
+-static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
++static ide_startstop_t __intentional_overflow(-1) ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+ sector_t block)
+ {
+ ide_hwif_t *hwif = drive->hwif;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
-index 4df97f6..c751151 100644
+index 3524b0d..8c14520 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
-@@ -570,7 +570,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
+@@ -576,7 +576,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
}
static
@@ -42775,8 +44401,21 @@ index 4df97f6..c751151 100644
const char *postfix,
struct iio_chan_spec const *chan,
ssize_t (*readfunc)(struct device *dev,
+diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
+index b13936d..65322b2 100644
+--- a/drivers/iio/magnetometer/ak8975.c
++++ b/drivers/iio/magnetometer/ak8975.c
+@@ -776,7 +776,7 @@ static int ak8975_probe(struct i2c_client *client,
+ name = id->name;
+ } else if (ACPI_HANDLE(&client->dev))
+ name = ak8975_match_acpi_device(&client->dev, &chipset);
+- else
++ if (!name)
+ return -ENOSYS;
+
+ if (chipset >= AK_MAX_TYPE) {
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
-index 0271608..81998c5 100644
+index 3a972eb..4126183 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -115,7 +115,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
@@ -42788,7 +44427,7 @@ index 0271608..81998c5 100644
};
struct cm_counter_attribute {
-@@ -1397,7 +1397,7 @@ static void cm_dup_req_handler(struct cm_work *work,
+@@ -1411,7 +1411,7 @@ static void cm_dup_req_handler(struct cm_work *work,
struct ib_mad_send_buf *msg = NULL;
int ret;
@@ -42797,7 +44436,7 @@ index 0271608..81998c5 100644
counter[CM_REQ_COUNTER]);
/* Quick state check to discard duplicate REQs. */
-@@ -1784,7 +1784,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
+@@ -1798,7 +1798,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
if (!cm_id_priv)
return;
@@ -42806,7 +44445,7 @@ index 0271608..81998c5 100644
counter[CM_REP_COUNTER]);
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
if (ret)
-@@ -1951,7 +1951,7 @@ static int cm_rtu_handler(struct cm_work *work)
+@@ -1965,7 +1965,7 @@ static int cm_rtu_handler(struct cm_work *work)
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
spin_unlock_irq(&cm_id_priv->lock);
@@ -42815,7 +44454,7 @@ index 0271608..81998c5 100644
counter[CM_RTU_COUNTER]);
goto out;
}
-@@ -2134,7 +2134,7 @@ static int cm_dreq_handler(struct cm_work *work)
+@@ -2148,7 +2148,7 @@ static int cm_dreq_handler(struct cm_work *work)
cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
dreq_msg->local_comm_id);
if (!cm_id_priv) {
@@ -42824,7 +44463,7 @@ index 0271608..81998c5 100644
counter[CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
return -EINVAL;
-@@ -2159,7 +2159,7 @@ static int cm_dreq_handler(struct cm_work *work)
+@@ -2173,7 +2173,7 @@ static int cm_dreq_handler(struct cm_work *work)
case IB_CM_MRA_REP_RCVD:
break;
case IB_CM_TIMEWAIT:
@@ -42833,7 +44472,7 @@ index 0271608..81998c5 100644
counter[CM_DREQ_COUNTER]);
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
goto unlock;
-@@ -2173,7 +2173,7 @@ static int cm_dreq_handler(struct cm_work *work)
+@@ -2187,7 +2187,7 @@ static int cm_dreq_handler(struct cm_work *work)
cm_free_msg(msg);
goto deref;
case IB_CM_DREQ_RCVD:
@@ -42842,7 +44481,7 @@ index 0271608..81998c5 100644
counter[CM_DREQ_COUNTER]);
goto unlock;
default:
-@@ -2540,7 +2540,7 @@ static int cm_mra_handler(struct cm_work *work)
+@@ -2554,7 +2554,7 @@ static int cm_mra_handler(struct cm_work *work)
ib_modify_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg, timeout)) {
if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
@@ -42851,7 +44490,7 @@ index 0271608..81998c5 100644
counter_group[CM_RECV_DUPLICATES].
counter[CM_MRA_COUNTER]);
goto out;
-@@ -2549,7 +2549,7 @@ static int cm_mra_handler(struct cm_work *work)
+@@ -2563,7 +2563,7 @@ static int cm_mra_handler(struct cm_work *work)
break;
case IB_CM_MRA_REQ_RCVD:
case IB_CM_MRA_REP_RCVD:
@@ -42860,7 +44499,7 @@ index 0271608..81998c5 100644
counter[CM_MRA_COUNTER]);
/* fall through */
default:
-@@ -2711,7 +2711,7 @@ static int cm_lap_handler(struct cm_work *work)
+@@ -2725,7 +2725,7 @@ static int cm_lap_handler(struct cm_work *work)
case IB_CM_LAP_IDLE:
break;
case IB_CM_MRA_LAP_SENT:
@@ -42869,7 +44508,7 @@ index 0271608..81998c5 100644
counter[CM_LAP_COUNTER]);
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
goto unlock;
-@@ -2727,7 +2727,7 @@ static int cm_lap_handler(struct cm_work *work)
+@@ -2741,7 +2741,7 @@ static int cm_lap_handler(struct cm_work *work)
cm_free_msg(msg);
goto deref;
case IB_CM_LAP_RCVD:
@@ -42878,7 +44517,7 @@ index 0271608..81998c5 100644
counter[CM_LAP_COUNTER]);
goto unlock;
default:
-@@ -3011,7 +3011,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
+@@ -3025,7 +3025,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
if (cur_cm_id_priv) {
spin_unlock_irq(&cm.lock);
@@ -42887,7 +44526,7 @@ index 0271608..81998c5 100644
counter[CM_SIDR_REQ_COUNTER]);
goto out; /* Duplicate message. */
}
-@@ -3223,10 +3223,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
+@@ -3237,10 +3237,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
msg->retries = 1;
@@ -42900,7 +44539,7 @@ index 0271608..81998c5 100644
&port->counter_group[CM_XMIT_RETRIES].
counter[attr_index]);
-@@ -3436,7 +3436,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
+@@ -3466,7 +3466,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
}
attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
@@ -42909,7 +44548,7 @@ index 0271608..81998c5 100644
counter[attr_id - CM_ATTR_ID_OFFSET]);
work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
-@@ -3667,7 +3667,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
+@@ -3709,7 +3709,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
cm_attr = container_of(attr, struct cm_counter_attribute, attr);
return sprintf(buf, "%ld\n",
@@ -42990,7 +44629,7 @@ index 9f5ad7c..588cd84 100644
}
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
-index a9f0489..27a161b 100644
+index a6ca83b..bd3a726 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -951,6 +951,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
@@ -43080,10 +44719,10 @@ index 1f95bba..9530f87 100644
sdata, wqe->wr.wr.atomic.swap);
goto send_comp;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
-index 9cd2b00..7486df4 100644
+index 68b3dfa..3e0c511 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
-@@ -106,7 +106,7 @@ __be64 mlx4_ib_gen_node_guid(void)
+@@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
{
@@ -43093,10 +44732,10 @@ index 9cd2b00..7486df4 100644
}
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
-index ed327e6..ca1739e0 100644
+index a0559a8..86a2320 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
-@@ -1041,7 +1041,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
+@@ -1042,7 +1042,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
{
char name[20];
@@ -43106,10 +44745,10 @@ index ed327e6..ca1739e0 100644
ctx->mcg_wq = create_singlethread_workqueue(name);
if (!ctx->mcg_wq)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
-index fce39343..9d8fdff 100644
+index 334387f..e640d74 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
-@@ -435,7 +435,7 @@ struct mlx4_ib_demux_ctx {
+@@ -436,7 +436,7 @@ struct mlx4_ib_demux_ctx {
struct list_head mcg_mgid0_list;
struct workqueue_struct *mcg_wq;
struct mlx4_ib_demux_pv_ctx **tun;
@@ -43119,7 +44758,7 @@ index fce39343..9d8fdff 100644
};
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
-index 9d3e5c1..6f166df 100644
+index c7f49bb..6a021bb 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
@@ -43155,8 +44794,8 @@ index 9d3e5c1..6f166df 100644
-int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
+int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
- int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
- void *in_mad, void *response_mad)
+ int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const void *in_mad, void *response_mad)
{
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index ded76c1..0cf0a08 100644
@@ -43203,10 +44842,10 @@ index ed9a989..6aa5dc2 100644
int list_len, u64 iova, u64 total_size,
u32 access, struct mthca_mr *mr)
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
-index 415f8e1..e34214e 100644
+index 93ae51d..84c4a44 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
-@@ -764,7 +764,7 @@ unlock:
+@@ -771,7 +771,7 @@ unlock:
return 0;
}
@@ -43297,7 +44936,7 @@ index bd9d132..70d84f4 100644
extern u32 int_mod_timer_init;
extern u32 int_mod_cq_depth_256;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
-index 72b4341..2600332 100644
+index 8a3ad17..e1ed4bc 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -69,14 +69,14 @@ u32 cm_packets_dropped;
@@ -43378,7 +45017,7 @@ index 72b4341..2600332 100644
} else {
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
}
-@@ -1667,7 +1667,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
+@@ -1670,7 +1670,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
cm_node->rem_mac);
add_hte_node(cm_core, cm_node);
@@ -43387,7 +45026,7 @@ index 72b4341..2600332 100644
return cm_node;
}
-@@ -1728,7 +1728,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
+@@ -1731,7 +1731,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
}
atomic_dec(&cm_core->node_cnt);
@@ -43396,7 +45035,7 @@ index 72b4341..2600332 100644
nesqp = cm_node->nesqp;
if (nesqp) {
nesqp->cm_node = NULL;
-@@ -1792,7 +1792,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
+@@ -1795,7 +1795,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
static void drop_packet(struct sk_buff *skb)
{
@@ -43405,7 +45044,7 @@ index 72b4341..2600332 100644
dev_kfree_skb_any(skb);
}
-@@ -1855,7 +1855,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
+@@ -1858,7 +1858,7