summaryrefslogtreecommitdiff
path: root/4.9.24
diff options
context:
space:
mode:
Diffstat (limited to '4.9.24')
-rw-r--r--4.9.24/0000_README56
-rw-r--r--4.9.24/1023_linux-4.9.24.patch3261
-rw-r--r--4.9.24/4420_grsecurity-3.1-4.9.24-201704220732.patch225996
-rw-r--r--4.9.24/4425_grsec_remove_EI_PAX.patch19
-rw-r--r--4.9.24/4426_default_XATTR_PAX_FLAGS.patch36
-rw-r--r--4.9.24/4427_force_XATTR_PAX_tmpfs.patch48
-rw-r--r--4.9.24/4430_grsec-remove-localversion-grsec.patch9
-rw-r--r--4.9.24/4435_grsec-mute-warnings.patch43
-rw-r--r--4.9.24/4440_grsec-remove-protected-paths.patch20
-rw-r--r--4.9.24/4450_grsec-kconfig-default-gids.patch111
-rw-r--r--4.9.24/4465_selinux-avc_audit-log-curr_ip.patch73
-rw-r--r--4.9.24/4470_disable-compat_vdso.patch58
-rw-r--r--4.9.24/4475_emutramp_default_on.patch34
13 files changed, 229764 insertions, 0 deletions
diff --git a/4.9.24/0000_README b/4.9.24/0000_README
new file mode 100644
index 0000000..89862a9
--- /dev/null
+++ b/4.9.24/0000_README
@@ -0,0 +1,56 @@
+README
+-----------------------------------------------------------------------------
+Individual Patch Descriptions:
+-----------------------------------------------------------------------------
+Patch: 1023_linux-4.9.24.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.24
+
+Patch: 4420_grsecurity-3.1-4.9.24-201704220732.patch
+From: http://www.grsecurity.net
+Desc: hardened-sources base patch from upstream grsecurity
+
+Patch: 4425_grsec_remove_EI_PAX.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Remove EI_PAX option and force off
+
+Patch: 4426_default_XATTR_PAX_FLAGS.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Defalut PT_PAX_FLAGS off and XATTR_PAX_FLAGS on
+
+Patch: 4427_force_XATTR_PAX_tmpfs.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Force XATTR_PAX on tmpfs
+
+Patch: 4430_grsec-remove-localversion-grsec.patch
+From: Kerin Millar <kerframil@gmail.com>
+Desc: Removes grsecurity's localversion-grsec file
+
+Patch: 4435_grsec-mute-warnings.patch
+From: Alexander Gabert <gaberta@fh-trier.de>
+ Gordon Malm <gengor@gentoo.org>
+Desc: Removes verbose compile warning settings from grsecurity, restores
+ mainline Linux kernel behavior
+
+Patch: 4440_grsec-remove-protected-paths.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Removes chmod statements from grsecurity/Makefile
+
+Patch: 4450_grsec-kconfig-default-gids.patch
+From: Kerin Millar <kerframil@gmail.com>
+Desc: Sets sane(r) default GIDs on various grsecurity group-dependent
+ features
+
+Patch: 4465_selinux-avc_audit-log-curr_ip.patch
+From: Gordon Malm <gengor@gentoo.org>
+ Anthony G. Basile <blueness@gentoo.org>
+Desc: Configurable option to add src IP address to SELinux log messages
+
+Patch: 4470_disable-compat_vdso.patch
+From: Gordon Malm <gengor@gentoo.org>
+ Kerin Millar <kerframil@gmail.com>
+Desc: Disables VDSO_COMPAT operation completely
+
+Patch: 4475_emutramp_default_on.patch
+From: Anthony G. Basile <blueness@gentoo.org>
+Desc: Set PAX_EMUTRAMP default on for libffi, bugs #329499 and #457194
diff --git a/4.9.24/1023_linux-4.9.24.patch b/4.9.24/1023_linux-4.9.24.patch
new file mode 100644
index 0000000..0ffd31b
--- /dev/null
+++ b/4.9.24/1023_linux-4.9.24.patch
@@ -0,0 +1,3261 @@
+diff --git a/Makefile b/Makefile
+index 0de7597..50436f5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 23
++SUBLEVEL = 24
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
+index 0ddf369..8ac0e59 100644
+--- a/arch/mips/lantiq/irq.c
++++ b/arch/mips/lantiq/irq.c
+@@ -269,11 +269,6 @@ static void ltq_hw5_irqdispatch(void)
+ DEFINE_HWx_IRQDISPATCH(5)
+ #endif
+
+-static void ltq_hw_irq_handler(struct irq_desc *desc)
+-{
+- ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
+-}
+-
+ #ifdef CONFIG_MIPS_MT_SMP
+ void __init arch_init_ipiirq(int irq, struct irqaction *action)
+ {
+@@ -318,19 +313,23 @@ static struct irqaction irq_call = {
+ asmlinkage void plat_irq_dispatch(void)
+ {
+ unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
+- int irq;
+-
+- if (!pending) {
+- spurious_interrupt();
+- return;
++ unsigned int i;
++
++ if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) {
++ do_IRQ(MIPS_CPU_TIMER_IRQ);
++ goto out;
++ } else {
++ for (i = 0; i < MAX_IM; i++) {
++ if (pending & (CAUSEF_IP2 << i)) {
++ ltq_hw_irqdispatch(i);
++ goto out;
++ }
++ }
+ }
++ pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());
+
+- pending >>= CAUSEB_IP;
+- while (pending) {
+- irq = fls(pending) - 1;
+- do_IRQ(MIPS_CPU_IRQ_BASE + irq);
+- pending &= ~BIT(irq);
+- }
++out:
++ return;
+ }
+
+ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+@@ -355,6 +354,11 @@ static const struct irq_domain_ops irq_domain_ops = {
+ .map = icu_map,
+ };
+
++static struct irqaction cascade = {
++ .handler = no_action,
++ .name = "cascade",
++};
++
+ int __init icu_of_init(struct device_node *node, struct device_node *parent)
+ {
+ struct device_node *eiu_node;
+@@ -386,7 +390,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
+ mips_cpu_irq_init();
+
+ for (i = 0; i < MAX_IM; i++)
+- irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
++ setup_irq(i + 2, &cascade);
+
+ if (cpu_has_vint) {
+ pr_info("Setting up vectored interrupts\n");
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 7fcf512..0497cec 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -42,10 +42,10 @@ static inline long access_ok(int type, const void __user * addr,
+ #define get_user __get_user
+
+ #if !defined(CONFIG_64BIT)
+-#define LDD_USER(ptr) __get_user_asm64(ptr)
++#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
+ #define STD_USER(x, ptr) __put_user_asm64(x, ptr)
+ #else
+-#define LDD_USER(ptr) __get_user_asm("ldd", ptr)
++#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
+ #define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
+ #endif
+
+@@ -100,63 +100,87 @@ struct exception_data {
+ " mtsp %0,%%sr2\n\t" \
+ : : "r"(get_fs()) : )
+
+-#define __get_user(x, ptr) \
+-({ \
+- register long __gu_err __asm__ ("r8") = 0; \
+- register long __gu_val; \
+- \
+- load_sr2(); \
+- switch (sizeof(*(ptr))) { \
+- case 1: __get_user_asm("ldb", ptr); break; \
+- case 2: __get_user_asm("ldh", ptr); break; \
+- case 4: __get_user_asm("ldw", ptr); break; \
+- case 8: LDD_USER(ptr); break; \
+- default: BUILD_BUG(); break; \
+- } \
+- \
+- (x) = (__force __typeof__(*(ptr))) __gu_val; \
+- __gu_err; \
++#define __get_user_internal(val, ptr) \
++({ \
++ register long __gu_err __asm__ ("r8") = 0; \
++ \
++ switch (sizeof(*(ptr))) { \
++ case 1: __get_user_asm(val, "ldb", ptr); break; \
++ case 2: __get_user_asm(val, "ldh", ptr); break; \
++ case 4: __get_user_asm(val, "ldw", ptr); break; \
++ case 8: LDD_USER(val, ptr); break; \
++ default: BUILD_BUG(); \
++ } \
++ \
++ __gu_err; \
+ })
+
+-#define __get_user_asm(ldx, ptr) \
++#define __get_user(val, ptr) \
++({ \
++ load_sr2(); \
++ __get_user_internal(val, ptr); \
++})
++
++#define __get_user_asm(val, ldx, ptr) \
++{ \
++ register long __gu_val; \
++ \
+ __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ : "=r"(__gu_val), "=r"(__gu_err) \
+- : "r"(ptr), "1"(__gu_err));
++ : "r"(ptr), "1"(__gu_err)); \
++ \
++ (val) = (__force __typeof__(*(ptr))) __gu_val; \
++}
+
+ #if !defined(CONFIG_64BIT)
+
+-#define __get_user_asm64(ptr) \
++#define __get_user_asm64(val, ptr) \
++{ \
++ union { \
++ unsigned long long l; \
++ __typeof__(*(ptr)) t; \
++ } __gu_tmp; \
++ \
+ __asm__(" copy %%r0,%R0\n" \
+ "1: ldw 0(%%sr2,%2),%0\n" \
+ "2: ldw 4(%%sr2,%2),%R0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
+- : "=r"(__gu_val), "=r"(__gu_err) \
+- : "r"(ptr), "1"(__gu_err));
++ : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
++ : "r"(ptr), "1"(__gu_err)); \
++ \
++ (val) = __gu_tmp.t; \
++}
+
+ #endif /* !defined(CONFIG_64BIT) */
+
+
+-#define __put_user(x, ptr) \
++#define __put_user_internal(x, ptr) \
+ ({ \
+ register long __pu_err __asm__ ("r8") = 0; \
+ __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
+ \
+- load_sr2(); \
+ switch (sizeof(*(ptr))) { \
+- case 1: __put_user_asm("stb", __x, ptr); break; \
+- case 2: __put_user_asm("sth", __x, ptr); break; \
+- case 4: __put_user_asm("stw", __x, ptr); break; \
+- case 8: STD_USER(__x, ptr); break; \
+- default: BUILD_BUG(); break; \
+- } \
++ case 1: __put_user_asm("stb", __x, ptr); break; \
++ case 2: __put_user_asm("sth", __x, ptr); break; \
++ case 4: __put_user_asm("stw", __x, ptr); break; \
++ case 8: STD_USER(__x, ptr); break; \
++ default: BUILD_BUG(); \
++ } \
+ \
+ __pu_err; \
+ })
+
++#define __put_user(x, ptr) \
++({ \
++ load_sr2(); \
++ __put_user_internal(x, ptr); \
++})
++
++
+ /*
+ * The "__put_user/kernel_asm()" macros tell gcc they read from memory
+ * instead of writing. This is because they do not write to any memory
+diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
+index f01188c..85c28bb 100644
+--- a/arch/parisc/lib/lusercopy.S
++++ b/arch/parisc/lib/lusercopy.S
+@@ -201,7 +201,7 @@ ENTRY_CFI(pa_memcpy)
+ add dst,len,end
+
+ /* short copy with less than 16 bytes? */
+- cmpib,>>=,n 15,len,.Lbyte_loop
++ cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+ /* same alignment? */
+ xor src,dst,t0
+@@ -216,7 +216,7 @@ ENTRY_CFI(pa_memcpy)
+ /* loop until we are 64-bit aligned */
+ .Lalign_loop64:
+ extru dst,31,3,t1
+- cmpib,=,n 0,t1,.Lcopy_loop_16
++ cmpib,=,n 0,t1,.Lcopy_loop_16_start
+ 20: ldb,ma 1(srcspc,src),t1
+ 21: stb,ma t1,1(dstspc,dst)
+ b .Lalign_loop64
+@@ -225,6 +225,7 @@ ENTRY_CFI(pa_memcpy)
+ ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
++.Lcopy_loop_16_start:
+ ldi 31,t0
+ .Lcopy_loop_16:
+ cmpb,COND(>>=),n t0,len,.Lword_loop
+@@ -267,7 +268,7 @@ ENTRY_CFI(pa_memcpy)
+ /* loop until we are 32-bit aligned */
+ .Lalign_loop32:
+ extru dst,31,2,t1
+- cmpib,=,n 0,t1,.Lcopy_loop_4
++ cmpib,=,n 0,t1,.Lcopy_loop_8
+ 20: ldb,ma 1(srcspc,src),t1
+ 21: stb,ma t1,1(dstspc,dst)
+ b .Lalign_loop32
+@@ -277,7 +278,7 @@ ENTRY_CFI(pa_memcpy)
+ ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+
+-.Lcopy_loop_4:
++.Lcopy_loop_8:
+ cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+ 10: ldw 0(srcspc,src),t1
+@@ -299,7 +300,7 @@ ENTRY_CFI(pa_memcpy)
+ ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+ ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+- b .Lcopy_loop_4
++ b .Lcopy_loop_8
+ ldo -16(len),len
+
+ .Lbyte_loop:
+@@ -324,7 +325,7 @@ ENTRY_CFI(pa_memcpy)
+ .Lunaligned_copy:
+ /* align until dst is 32bit-word-aligned */
+ extru dst,31,2,t1
+- cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
++ cmpib,=,n 0,t1,.Lcopy_dstaligned
+ 20: ldb 0(srcspc,src),t1
+ ldo 1(src),src
+ 21: stb,ma t1,1(dstspc,dst)
+@@ -362,7 +363,7 @@ ENTRY_CFI(pa_memcpy)
+ cmpiclr,<> 1,t0,%r0
+ b,n .Lcase1
+ .Lcase0:
+- cmpb,= %r0,len,.Lcda_finish
++ cmpb,COND(=) %r0,len,.Lcda_finish
+ nop
+
+ 1: ldw,ma 4(srcspc,src), a3
+@@ -376,7 +377,7 @@ ENTRY_CFI(pa_memcpy)
+ 1: ldw,ma 4(srcspc,src), a3
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+ ldo -1(len),len
+- cmpb,=,n %r0,len,.Ldo0
++ cmpb,COND(=),n %r0,len,.Ldo0
+ .Ldo4:
+ 1: ldw,ma 4(srcspc,src), a0
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+@@ -402,7 +403,7 @@ ENTRY_CFI(pa_memcpy)
+ 1: stw,ma t0, 4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+ ldo -4(len),len
+- cmpb,<> %r0,len,.Ldo4
++ cmpb,COND(<>) %r0,len,.Ldo4
+ nop
+ .Ldo0:
+ shrpw a2, a3, %sar, t0
+@@ -436,14 +437,14 @@ ENTRY_CFI(pa_memcpy)
+ /* fault exception fixup handlers: */
+ #ifdef CONFIG_64BIT
+ .Lcopy16_fault:
+-10: b .Lcopy_done
+- std,ma t1,8(dstspc,dst)
++ b .Lcopy_done
++10: std,ma t1,8(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+ #endif
+
+ .Lcopy8_fault:
+-10: b .Lcopy_done
+- stw,ma t1,4(dstspc,dst)
++ b .Lcopy_done
++10: stw,ma t1,4(dstspc,dst)
+ ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+
+ .exit
+diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
+index 7853b53..3f9d1a8 100644
+--- a/arch/x86/entry/vdso/vdso32-setup.c
++++ b/arch/x86/entry/vdso/vdso32-setup.c
+@@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s)
+ {
+ vdso32_enabled = simple_strtoul(s, NULL, 0);
+
+- if (vdso32_enabled > 1)
++ if (vdso32_enabled > 1) {
+ pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
++ vdso32_enabled = 0;
++ }
+
+ return 1;
+ }
+@@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup);
+ /* Register vsyscall32 into the ABI table */
+ #include <linux/sysctl.h>
+
++static const int zero;
++static const int one = 1;
++
+ static struct ctl_table abi_table2[] = {
+ {
+ .procname = "vsyscall32",
+ .data = &vdso32_enabled,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+- .proc_handler = proc_dointvec
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = (int *)&zero,
++ .extra2 = (int *)&one,
+ },
+ {}
+ };
+diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
+index 81b321a..f924629 100644
+--- a/arch/x86/events/intel/lbr.c
++++ b/arch/x86/events/intel/lbr.c
+@@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
+ cpuc->lbr_entries[i].to = msr_lastbranch.to;
+ cpuc->lbr_entries[i].mispred = 0;
+ cpuc->lbr_entries[i].predicted = 0;
++ cpuc->lbr_entries[i].in_tx = 0;
++ cpuc->lbr_entries[i].abort = 0;
++ cpuc->lbr_entries[i].cycles = 0;
+ cpuc->lbr_entries[i].reserved = 0;
+ }
+ cpuc->lbr_stack.nr = i;
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index e7f155c..94aad63 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -278,7 +278,7 @@ struct task_struct;
+
+ #define ARCH_DLINFO_IA32 \
+ do { \
+- if (vdso32_enabled) { \
++ if (VDSO_CURRENT_BASE) { \
+ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
+ } \
+diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
+index 2c1ebeb..529bb4a 100644
+--- a/arch/x86/include/asm/pmem.h
++++ b/arch/x86/include/asm/pmem.h
+@@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
+ * @size: number of bytes to write back
+ *
+ * Write back a cache range using the CLWB (cache line write back)
+- * instruction.
++ * instruction. Note that @size is internally rounded up to be cache
++ * line size aligned.
+ */
+ static inline void arch_wb_cache_pmem(void *addr, size_t size)
+ {
+@@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
+ clwb(p);
+ }
+
+-/*
+- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
+- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
+- */
+-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
+-{
+- return iter_is_iovec(i) == false;
+-}
+-
+ /**
+ * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
+ * @addr: PMEM destination address
+@@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
+ /* TODO: skip the write-back by always using non-temporal stores */
+ len = copy_from_iter_nocache(addr, bytes, i);
+
+- if (__iter_needs_pmem_wb(i))
++ /*
++ * In the iovec case on x86_64 copy_from_iter_nocache() uses
++ * non-temporal stores for the bulk of the transfer, but we need
++ * to manually flush if the transfer is unaligned. A cached
++ * memory copy is used when destination or size is not naturally
++ * aligned. That is:
++ * - Require 8-byte alignment when size is 8 bytes or larger.
++ * - Require 4-byte alignment when size is 4 bytes.
++ *
++ * In the non-iovec case the entire destination needs to be
++ * flushed.
++ */
++ if (iter_is_iovec(i)) {
++ unsigned long flushed, dest = (unsigned long) addr;
++
++ if (bytes < 8) {
++ if (!IS_ALIGNED(dest, 4) || (bytes != 4))
++ arch_wb_cache_pmem(addr, 1);
++ } else {
++ if (!IS_ALIGNED(dest, 8)) {
++ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
++ arch_wb_cache_pmem(addr, 1);
++ }
++
++ flushed = dest - (unsigned long) addr;
++ if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
++ arch_wb_cache_pmem(addr + bytes - 1, 1);
++ }
++ } else
+ arch_wb_cache_pmem(addr, bytes);
+
+ return len;
+diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
+index ec1f756..71beb28 100644
+--- a/arch/x86/kernel/signal_compat.c
++++ b/arch/x86/kernel/signal_compat.c
+@@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
+
+ if (from->si_signo == SIGSEGV) {
+ if (from->si_code == SEGV_BNDERR) {
+- compat_uptr_t lower = (unsigned long)&to->si_lower;
+- compat_uptr_t upper = (unsigned long)&to->si_upper;
++ compat_uptr_t lower = (unsigned long)from->si_lower;
++ compat_uptr_t upper = (unsigned long)from->si_upper;
+ put_user_ex(lower, &to->si_lower);
+ put_user_ex(upper, &to->si_upper);
+ }
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 69b8f8a..43b55ef 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6925,14 +6925,20 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
+ }
+
+ page = nested_get_page(vcpu, vmptr);
+- if (page == NULL ||
+- *(u32 *)kmap(page) != VMCS12_REVISION) {
++ if (page == NULL) {
+ nested_vmx_failInvalid(vcpu);
++ skip_emulated_instruction(vcpu);
++ return 1;
++ }
++ if (*(u32 *)kmap(page) != VMCS12_REVISION) {
+ kunmap(page);
++ nested_release_page_clean(page);
++ nested_vmx_failInvalid(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ kunmap(page);
++ nested_release_page_clean(page);
+ vmx->nested.vmxon_ptr = vmptr;
+ break;
+ case EXIT_REASON_VMCLEAR:
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 22af912..889e761 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain address
+ * is valid. The argument is a physical page number.
+ *
+- *
+- * On x86, access has to be given to the first megabyte of ram because that area
+- * contains BIOS code and data regions used by X and dosemu and similar apps.
+- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+- * mmio resources as well as potential bios/acpi data regions.
++ * On x86, access has to be given to the first megabyte of RAM because that
++ * area traditionally contains BIOS code and data regions used by X, dosemu,
++ * and similar apps. Since they map the entire memory range, the whole range
++ * must be allowed (for mapping), but any areas that would otherwise be
++ * disallowed are flagged as being "zero filled" instead of rejected.
++ * Access has to be given to non-kernel-ram areas as well, these contain the
++ * PCI mmio resources as well as potential bios/acpi data regions.
+ */
+ int devmem_is_allowed(unsigned long pagenr)
+ {
+- if (pagenr < 256)
+- return 1;
+- if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
++ if (page_is_ram(pagenr)) {
++ /*
++ * For disallowed memory regions in the low 1MB range,
++ * request that the page be shown as all zeros.
++ */
++ if (pagenr < 256)
++ return 2;
++
++ return 0;
++ }
++
++ /*
++ * This must follow RAM test, since System RAM is considered a
++ * restricted resource under CONFIG_STRICT_IOMEM.
++ */
++ if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
++ /* Low 1MB bypasses iomem restrictions. */
++ if (pagenr < 256)
++ return 1;
++
+ return 0;
+- if (!page_is_ram(pagenr))
+- return 1;
+- return 0;
++ }
++
++ return 1;
+ }
+
+ void free_init_pages(char *what, unsigned long begin, unsigned long end)
+diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
+index 30031d5..cdfe8c6 100644
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
+ return;
+ }
+
++ /* No need to reserve regions that will never be freed. */
++ if (md.attribute & EFI_MEMORY_RUNTIME)
++ return;
++
+ size += addr % EFI_PAGE_SIZE;
+ size = round_up(size, EFI_PAGE_SIZE);
+ addr = round_down(addr, EFI_PAGE_SIZE);
+diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
+index 44c88ad..bcea81f 100644
+--- a/arch/x86/xen/apic.c
++++ b/arch/x86/xen/apic.c
+@@ -145,7 +145,7 @@ static void xen_silent_inquire(int apicid)
+ static int xen_cpu_present_to_apicid(int cpu)
+ {
+ if (cpu_present(cpu))
+- return xen_get_apic_id(xen_apic_read(APIC_ID));
++ return cpu_data(cpu).apicid;
+ else
+ return BAD_APICID;
+ }
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index 2ce8bcb..cce0268 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -31,6 +31,7 @@ struct ahash_request_priv {
+ crypto_completion_t complete;
+ void *data;
+ u8 *result;
++ u32 flags;
+ void *ubuf[] CRYPTO_MINALIGN_ATTR;
+ };
+
+@@ -252,6 +253,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
+ priv->result = req->result;
+ priv->complete = req->base.complete;
+ priv->data = req->base.data;
++ priv->flags = req->base.flags;
++
+ /*
+ * WARNING: We do not backup req->priv here! The req->priv
+ * is for internal use of the Crypto API and the
+@@ -266,38 +269,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
+ return 0;
+ }
+
+-static void ahash_restore_req(struct ahash_request *req)
++static void ahash_restore_req(struct ahash_request *req, int err)
+ {
+ struct ahash_request_priv *priv = req->priv;
+
++ if (!err)
++ memcpy(priv->result, req->result,
++ crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
++
+ /* Restore the original crypto request. */
+ req->result = priv->result;
+- req->base.complete = priv->complete;
+- req->base.data = priv->data;
++
++ ahash_request_set_callback(req, priv->flags,
++ priv->complete, priv->data);
+ req->priv = NULL;
+
+ /* Free the req->priv.priv from the ADJUSTED request. */
+ kzfree(priv);
+ }
+
+-static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
++static void ahash_notify_einprogress(struct ahash_request *req)
+ {
+ struct ahash_request_priv *priv = req->priv;
++ struct crypto_async_request oreq;
+
+- if (err == -EINPROGRESS)
+- return;
+-
+- if (!err)
+- memcpy(priv->result, req->result,
+- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
++ oreq.data = priv->data;
+
+- ahash_restore_req(req);
++ priv->complete(&oreq, -EINPROGRESS);
+ }
+
+ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
+ {
+ struct ahash_request *areq = req->data;
+
++ if (err == -EINPROGRESS) {
++ ahash_notify_einprogress(areq);
++ return;
++ }
++
+ /*
+ * Restore the original request, see ahash_op_unaligned() for what
+ * goes where.
+@@ -308,7 +317,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
+ */
+
+ /* First copy req->result into req->priv.result */
+- ahash_op_unaligned_finish(areq, err);
++ ahash_restore_req(areq, err);
+
+ /* Complete the ORIGINAL request. */
+ areq->base.complete(&areq->base, err);
+@@ -324,7 +333,12 @@ static int ahash_op_unaligned(struct ahash_request *req,
+ return err;
+
+ err = op(req);
+- ahash_op_unaligned_finish(req, err);
++ if (err == -EINPROGRESS ||
++ (err == -EBUSY && (ahash_request_flags(req) &
++ CRYPTO_TFM_REQ_MAY_BACKLOG)))
++ return err;
++
++ ahash_restore_req(req, err);
+
+ return err;
+ }
+@@ -359,25 +373,14 @@ int crypto_ahash_digest(struct ahash_request *req)
+ }
+ EXPORT_SYMBOL_GPL(crypto_ahash_digest);
+
+-static void ahash_def_finup_finish2(struct ahash_request *req, int err)
++static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
+ {
+- struct ahash_request_priv *priv = req->priv;
++ struct ahash_request *areq = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+- if (!err)
+- memcpy(priv->result, req->result,
+- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+-
+- ahash_restore_req(req);
+-}
+-
+-static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
+-{
+- struct ahash_request *areq = req->data;
+-
+- ahash_def_finup_finish2(areq, err);
++ ahash_restore_req(areq, err);
+
+ areq->base.complete(&areq->base, err);
+ }
+@@ -388,11 +391,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
+ goto out;
+
+ req->base.complete = ahash_def_finup_done2;
+- req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++
+ err = crypto_ahash_reqtfm(req)->final(req);
++ if (err == -EINPROGRESS ||
++ (err == -EBUSY && (ahash_request_flags(req) &
++ CRYPTO_TFM_REQ_MAY_BACKLOG)))
++ return err;
+
+ out:
+- ahash_def_finup_finish2(req, err);
++ ahash_restore_req(req, err);
+ return err;
+ }
+
+@@ -400,7 +407,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
+ {
+ struct ahash_request *areq = req->data;
+
++ if (err == -EINPROGRESS) {
++ ahash_notify_einprogress(areq);
++ return;
++ }
++
++ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
++
+ err = ahash_def_finup_finish1(areq, err);
++ if (areq->priv)
++ return;
+
+ areq->base.complete(&areq->base, err);
+ }
+@@ -415,6 +431,11 @@ static int ahash_def_finup(struct ahash_request *req)
+ return err;
+
+ err = tfm->update(req);
++ if (err == -EINPROGRESS ||
++ (err == -EBUSY && (ahash_request_flags(req) &
++ CRYPTO_TFM_REQ_MAY_BACKLOG)))
++ return err;
++
+ return ahash_def_finup_finish1(req, err);
+ }
+
+diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
+index e8817e2..fde8d88 100644
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -39,6 +39,7 @@ struct aead_async_req {
+ struct aead_async_rsgl first_rsgl;
+ struct list_head list;
+ struct kiocb *iocb;
++ struct sock *sk;
+ unsigned int tsgls;
+ char iv[];
+ };
+@@ -379,12 +380,10 @@ static ssize_t aead_sendpage(struct socket *sock, struct page *page,
+
+ static void aead_async_cb(struct crypto_async_request *_req, int err)
+ {
+- struct sock *sk = _req->data;
+- struct alg_sock *ask = alg_sk(sk);
+- struct aead_ctx *ctx = ask->private;
+- struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
+- struct aead_request *req = aead_request_cast(_req);
++ struct aead_request *req = _req->data;
++ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
++ struct sock *sk = areq->sk;
+ struct scatterlist *sg = areq->tsgl;
+ struct aead_async_rsgl *rsgl;
+ struct kiocb *iocb = areq->iocb;
+@@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
+ memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
+ INIT_LIST_HEAD(&areq->list);
+ areq->iocb = msg->msg_iocb;
++ areq->sk = sk;
+ memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
+ aead_request_set_tfm(req, tfm);
+ aead_request_set_ad(req, ctx->aead_assoclen);
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+- aead_async_cb, sk);
++ aead_async_cb, req);
+ used -= ctx->aead_assoclen;
+
+ /* take over all tx sgls from ctx */
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 48e19d0..22ca892 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -729,12 +729,12 @@ static void start_transaction(struct acpi_ec *ec)
+
+ static int ec_guard(struct acpi_ec *ec)
+ {
+- unsigned long guard = usecs_to_jiffies(ec_polling_guard);
++ unsigned long guard = usecs_to_jiffies(ec->polling_guard);
+ unsigned long timeout = ec->timestamp + guard;
+
+ /* Ensure guarding period before polling EC status */
+ do {
+- if (ec_busy_polling) {
++ if (ec->busy_polling) {
+ /* Perform busy polling */
+ if (ec_transaction_completed(ec))
+ return 0;
+@@ -998,6 +998,28 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
+ spin_unlock_irqrestore(&ec->lock, flags);
+ }
+
++static void acpi_ec_enter_noirq(struct acpi_ec *ec)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ec->lock, flags);
++ ec->busy_polling = true;
++ ec->polling_guard = 0;
++ ec_log_drv("interrupt blocked");
++ spin_unlock_irqrestore(&ec->lock, flags);
++}
++
++static void acpi_ec_leave_noirq(struct acpi_ec *ec)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ec->lock, flags);
++ ec->busy_polling = ec_busy_polling;
++ ec->polling_guard = ec_polling_guard;
++ ec_log_drv("interrupt unblocked");
++ spin_unlock_irqrestore(&ec->lock, flags);
++}
++
+ void acpi_ec_block_transactions(void)
+ {
+ struct acpi_ec *ec = first_ec;
+@@ -1278,7 +1300,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ if (function != ACPI_READ && function != ACPI_WRITE)
+ return AE_BAD_PARAMETER;
+
+- if (ec_busy_polling || bits > 8)
++ if (ec->busy_polling || bits > 8)
+ acpi_ec_burst_enable(ec);
+
+ for (i = 0; i < bytes; ++i, ++address, ++value)
+@@ -1286,7 +1308,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
+ acpi_ec_read(ec, address, value) :
+ acpi_ec_write(ec, address, *value);
+
+- if (ec_busy_polling || bits > 8)
++ if (ec->busy_polling || bits > 8)
+ acpi_ec_burst_disable(ec);
+
+ switch (result) {
+@@ -1329,6 +1351,8 @@ static struct acpi_ec *acpi_ec_alloc(void)
+ spin_lock_init(&ec->lock);
+ INIT_WORK(&ec->work, acpi_ec_event_handler);
+ ec->timestamp = jiffies;
++ ec->busy_polling = true;
++ ec->polling_guard = 0;
+ return ec;
+ }
+
+@@ -1390,6 +1414,7 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
+ acpi_ec_start(ec, false);
+
+ if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
++ acpi_ec_enter_noirq(ec);
+ status = acpi_install_address_space_handler(ec->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_ec_space_handler,
+@@ -1429,6 +1454,7 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
+ /* This is not fatal as we can poll EC events */
+ if (ACPI_SUCCESS(status)) {
+ set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
++ acpi_ec_leave_noirq(ec);
+ if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_ec_enable_gpe(ec, true);
+@@ -1839,34 +1865,6 @@ int __init acpi_ec_ecdt_probe(void)
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+-static void acpi_ec_enter_noirq(struct acpi_ec *ec)
+-{
+- unsigned long flags;
+-
+- if (ec == first_ec) {
+- spin_lock_irqsave(&ec->lock, flags);
+- ec->saved_busy_polling = ec_busy_polling;
+- ec->saved_polling_guard = ec_polling_guard;
+- ec_busy_polling = true;
+- ec_polling_guard = 0;
+- ec_log_drv("interrupt blocked");
+- spin_unlock_irqrestore(&ec->lock, flags);
+- }
+-}
+-
+-static void acpi_ec_leave_noirq(struct acpi_ec *ec)
+-{
+- unsigned long flags;
+-
+- if (ec == first_ec) {
+- spin_lock_irqsave(&ec->lock, flags);
+- ec_busy_polling = ec->saved_busy_polling;
+- ec_polling_guard = ec->saved_polling_guard;
+- ec_log_drv("interrupt unblocked");
+- spin_unlock_irqrestore(&ec->lock, flags);
+- }
+-}
+-
+ static int acpi_ec_suspend_noirq(struct device *dev)
+ {
+ struct acpi_ec *ec =
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index 0c45226..219b90b 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -172,8 +172,8 @@ struct acpi_ec {
+ struct work_struct work;
+ unsigned long timestamp;
+ unsigned long nr_pending_queries;
+- bool saved_busy_polling;
+- unsigned int saved_polling_guard;
++ bool busy_polling;
++ unsigned int polling_guard;
+ };
+
+ extern struct acpi_ec *first_ec;
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index d1664df..9ef3941 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1)
+ const struct nfit_set_info_map *map0 = m0;
+ const struct nfit_set_info_map *map1 = m1;
+
+- return map0->region_offset - map1->region_offset;
++ if (map0->region_offset < map1->region_offset)
++ return -1;
++ else if (map0->region_offset > map1->region_offset)
++ return 1;
++ return 0;
+ }
+
+ /* Retrieve the nth entry referencing this spa */
+diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
+index 5a2fdf1..dd3786a 100644
+--- a/drivers/acpi/scan.c
++++ b/drivers/acpi/scan.c
+@@ -1827,15 +1827,20 @@ static void acpi_bus_attach(struct acpi_device *device)
+ return;
+
+ device->flags.match_driver = true;
+- if (!ret) {
+- ret = device_attach(&device->dev);
+- if (ret < 0)
+- return;
+-
+- if (!ret && device->pnp.type.platform_id)
+- acpi_default_enumeration(device);
++ if (ret > 0) {
++ acpi_device_set_enumerated(device);
++ goto ok;
+ }
+
++ ret = device_attach(&device->dev);
++ if (ret < 0)
++ return;
++
++ if (ret > 0 || !device->pnp.type.platform_id)
++ acpi_device_set_enumerated(device);
++ else
++ acpi_default_enumeration(device);
++
+ ok:
+ list_for_each_entry(child, &device->children, node)
+ acpi_bus_attach(child);
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 7a10487..c9441f9 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -54,7 +54,7 @@ struct nbd_device {
+
+ struct mutex tx_lock;
+ struct gendisk *disk;
+- int blksize;
++ loff_t blksize;
+ loff_t bytesize;
+
+ /* protects initialization and shutdown of the socket */
+@@ -126,7 +126,7 @@ static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
+ }
+
+ static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
+- int blocksize, int nr_blocks)
++ loff_t blocksize, loff_t nr_blocks)
+ {
+ int ret;
+
+@@ -135,7 +135,7 @@ static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
+ return ret;
+
+ nbd->blksize = blocksize;
+- nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
++ nbd->bytesize = blocksize * nr_blocks;
+
+ nbd_size_update(nbd, bdev);
+
+@@ -648,7 +648,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
+
+ case NBD_SET_SIZE:
+ return nbd_size_set(nbd, bdev, nbd->blksize,
+- arg / nbd->blksize);
++ div_s64(arg, nbd->blksize));
+
+ case NBD_SET_SIZE_BLOCKS:
+ return nbd_size_set(nbd, bdev, nbd->blksize, arg);
+@@ -817,7 +817,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
+ debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
+ debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
+ debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
+- debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
++ debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize);
+ debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
+
+ return 0;
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index d2ef51c..c9914d6 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -582,13 +582,13 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+
+ if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+- clear_page(mem);
++ memset(mem, 0, PAGE_SIZE);
+ return 0;
+ }
+
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+ if (size == PAGE_SIZE) {
+- copy_page(mem, cmem);
++ memcpy(mem, cmem, PAGE_SIZE);
+ } else {
+ struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
+
+@@ -780,7 +780,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
+
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
+ src = kmap_atomic(page);
+- copy_page(cmem, src);
++ memcpy(cmem, src, PAGE_SIZE);
+ kunmap_atomic(src);
+ } else {
+ memcpy(cmem, src, clen);
+diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
+index dcc0973..8453a49 100644
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -571,9 +571,12 @@ config TELCLOCK
+ controlling the behavior of this hardware.
+
+ config DEVPORT
+- bool
++ bool "/dev/port character device"
+ depends on ISA || PCI
+ default y
++ help
++ Say Y here if you want to support the /dev/port device. The /dev/port
++ device is similar to /dev/mem, but for I/O ports.
+
+ source "drivers/s390/char/Kconfig"
+
+diff --git a/drivers/char/mem.c b/drivers/char/mem.c
+index 6d9cc2d..7e4a9d1 100644
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
+ #endif
+
+ #ifdef CONFIG_STRICT_DEVMEM
++static inline int page_is_allowed(unsigned long pfn)
++{
++ return devmem_is_allowed(pfn);
++}
+ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ {
+ u64 from = ((u64)pfn) << PAGE_SHIFT;
+@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ return 1;
+ }
+ #else
++static inline int page_is_allowed(unsigned long pfn)
++{
++ return 1;
++}
+ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+ {
+ return 1;
+@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
+
+ while (count > 0) {
+ unsigned long remaining;
++ int allowed;
+
+ sz = size_inside_page(p, count);
+
+- if (!range_is_allowed(p >> PAGE_SHIFT, count))
++ allowed = page_is_allowed(p >> PAGE_SHIFT);
++ if (!allowed)
+ return -EPERM;
++ if (allowed == 2) {
++ /* Show zeros for restricted memory. */
++ remaining = clear_user(buf, sz);
++ } else {
++ /*
++ * On ia64 if a page has been mapped somewhere as
++ * uncached, then it must also be accessed uncached
++ * by the kernel or data corruption may occur.
++ */
++ ptr = xlate_dev_mem_ptr(p);
++ if (!ptr)
++ return -EFAULT;
+
+- /*
+- * On ia64 if a page has been mapped somewhere as uncached, then
+- * it must also be accessed uncached by the kernel or data
+- * corruption may occur.
+- */
+- ptr = xlate_dev_mem_ptr(p);
+- if (!ptr)
+- return -EFAULT;
++ remaining = copy_to_user(buf, ptr, sz);
++
++ unxlate_dev_mem_ptr(p, ptr);
++ }
+
+- remaining = copy_to_user(buf, ptr, sz);
+- unxlate_dev_mem_ptr(p, ptr);
+ if (remaining)
+ return -EFAULT;
+
+@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
+ #endif
+
+ while (count > 0) {
++ int allowed;
++
+ sz = size_inside_page(p, count);
+
+- if (!range_is_allowed(p >> PAGE_SHIFT, sz))
++ allowed = page_is_allowed(p >> PAGE_SHIFT);
++ if (!allowed)
+ return -EPERM;
+
+- /*
+- * On ia64 if a page has been mapped somewhere as uncached, then
+- * it must also be accessed uncached by the kernel or data
+- * corruption may occur.
+- */
+- ptr = xlate_dev_mem_ptr(p);
+- if (!ptr) {
+- if (written)
+- break;
+- return -EFAULT;
+- }
++ /* Skip actual writing when a page is marked as restricted. */
++ if (allowed == 1) {
++ /*
++ * On ia64 if a page has been mapped somewhere as
++ * uncached, then it must also be accessed uncached
++ * by the kernel or data corruption may occur.
++ */
++ ptr = xlate_dev_mem_ptr(p);
++ if (!ptr) {
++ if (written)
++ break;
++ return -EFAULT;
++ }
+
+- copied = copy_from_user(ptr, buf, sz);
+- unxlate_dev_mem_ptr(p, ptr);
+- if (copied) {
+- written += sz - copied;
+- if (written)
+- break;
+- return -EFAULT;
++ copied = copy_from_user(ptr, buf, sz);
++ unxlate_dev_mem_ptr(p, ptr);
++ if (copied) {
++ written += sz - copied;
++ if (written)
++ break;
++ return -EFAULT;
++ }
+ }
+
+ buf += sz;
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 5649234..471a301 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1136,6 +1136,8 @@ static int put_chars(u32 vtermno, const char *buf, int count)
+ {
+ struct port *port;
+ struct scatterlist sg[1];
++ void *data;
++ int ret;
+
+ if (unlikely(early_put_chars))
+ return early_put_chars(vtermno, buf, count);
+@@ -1144,8 +1146,14 @@ static int put_chars(u32 vtermno, const char *buf, int count)
+ if (!port)
+ return -EPIPE;
+
+- sg_init_one(sg, buf, count);
+- return __send_to_port(port, sg, 1, count, (void *)buf, false);
++ data = kmemdup(buf, count, GFP_ATOMIC);
++ if (!data)
++ return -ENOMEM;
++
++ sg_init_one(sg, data, count);
++ ret = __send_to_port(port, sg, 1, count, data, false);
++ kfree(data);
++ return ret;
+ }
+
+ /*
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index cac4a92..6153b66 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2404,6 +2404,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
+ *********************************************************************/
+ static enum cpuhp_state hp_online;
+
++static int cpuhp_cpufreq_online(unsigned int cpu)
++{
++ cpufreq_online(cpu);
++
++ return 0;
++}
++
++static int cpuhp_cpufreq_offline(unsigned int cpu)
++{
++ cpufreq_offline(cpu);
++
++ return 0;
++}
++
+ /**
+ * cpufreq_register_driver - register a CPU Frequency driver
+ * @driver_data: A struct cpufreq_driver containing the values#
+@@ -2466,8 +2480,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+ }
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
+- cpufreq_online,
+- cpufreq_offline);
++ cpuhp_cpufreq_online,
++ cpuhp_cpufreq_offline);
+ if (ret < 0)
+ goto err_if_unreg;
+ hp_online = ret;
+diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
+index 932742e..24c461d 100644
+--- a/drivers/firmware/efi/libstub/gop.c
++++ b/drivers/firmware/efi/libstub/gop.c
+@@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
+
+ status = __gop_query32(sys_table_arg, gop32, &info, &size,
+ &current_fb_base);
+- if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
++ if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
++ info->pixel_format != PIXEL_BLT_ONLY) {
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+@@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
+
+ status = __gop_query64(sys_table_arg, gop64, &info, &size,
+ &current_fb_base);
+- if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
++ if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
++ info->pixel_format != PIXEL_BLT_ONLY) {
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+index b87d278..a336754 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+@@ -1305,7 +1305,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
+ if (!fence) {
+ event_free(gpu, event);
+ ret = -ENOMEM;
+- goto out_pm_put;
++ goto out_unlock;
+ }
+
+ gpu->event[event].fence = fence;
+@@ -1345,6 +1345,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
+ hangcheck_timer_reset(gpu);
+ ret = 0;
+
++out_unlock:
+ mutex_unlock(&gpu->lock);
+
+ out_pm_put:
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+index e0d7f84..d741ff8 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+@@ -714,7 +714,7 @@ nv4a_chipset = {
+ .i2c = nv04_i2c_new,
+ .imem = nv40_instmem_new,
+ .mc = nv44_mc_new,
+- .mmu = nv44_mmu_new,
++ .mmu = nv04_mmu_new,
+ .pci = nv40_pci_new,
+ .therm = nv40_therm_new,
+ .timer = nv41_timer_new,
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+index fbb8c7d..0d65e7f 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
+ case 0x94:
+ case 0x96:
+ case 0x98:
+- case 0xaa:
+- case 0xac:
+ return true;
+ default:
+ break;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+index 003ac91..8a88952 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
+ }
+
+ if (type == 0x00000010) {
+- if (!nv31_mpeg_mthd(mpeg, mthd, data))
++ if (nv31_mpeg_mthd(mpeg, mthd, data))
+ show &= ~0x01000000;
+ }
+ }
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+index e536f37..c3cf02e 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
+ }
+
+ if (type == 0x00000010) {
+- if (!nv44_mpeg_mthd(subdev->device, mthd, data))
++ if (nv44_mpeg_mthd(subdev->device, mthd, data))
+ show &= ~0x01000000;
+ }
+ }
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index bbe1524..f397a5b 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -201,6 +201,7 @@ static const struct xpad_device {
+ { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
++ { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
+ { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
+ { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
+@@ -329,6 +330,7 @@ static struct usb_device_id xpad_table[] = {
+ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
++ XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
+ XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
+ { }
+diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
+index 15af9a9..2d203b4 100644
+--- a/drivers/irqchip/irq-imx-gpcv2.c
++++ b/drivers/irqchip/irq-imx-gpcv2.c
+@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
+ return -ENOMEM;
+ }
+
++ raw_spin_lock_init(&cd->rlock);
++
+ cd->gpc_base = of_iomap(node, 0);
+ if (!cd->gpc_base) {
+ pr_err("fsl-gpcv2: unable to map gpc registers\n");
+diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+index a8e6624..a9bb2dd 100644
+--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
++++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+@@ -1013,8 +1013,8 @@ EXPORT_SYMBOL(dvb_usbv2_probe);
+ void dvb_usbv2_disconnect(struct usb_interface *intf)
+ {
+ struct dvb_usb_device *d = usb_get_intfdata(intf);
+- const char *name = d->name;
+- struct device dev = d->udev->dev;
++ const char *devname = kstrdup(dev_name(&d->udev->dev), GFP_KERNEL);
++ const char *drvname = d->name;
+
+ dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
+ intf->cur_altsetting->desc.bInterfaceNumber);
+@@ -1024,8 +1024,9 @@ void dvb_usbv2_disconnect(struct usb_interface *intf)
+
+ dvb_usbv2_exit(d);
+
+- dev_info(&dev, "%s: '%s' successfully deinitialized and disconnected\n",
+- KBUILD_MODNAME, name);
++ pr_info("%s: '%s:%s' successfully deinitialized and disconnected\n",
++ KBUILD_MODNAME, drvname, devname);
++ kfree(devname);
+ }
+ EXPORT_SYMBOL(dvb_usbv2_disconnect);
+
+diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
+index 2434030..9fd43a3 100644
+--- a/drivers/media/usb/dvb-usb/cxusb.c
++++ b/drivers/media/usb/dvb-usb/cxusb.c
+@@ -59,23 +59,24 @@ static int cxusb_ctrl_msg(struct dvb_usb_device *d,
+ u8 cmd, u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+ {
+ struct cxusb_state *st = d->priv;
+- int ret, wo;
++ int ret;
+
+ if (1 + wlen > MAX_XFER_SIZE) {
+ warn("i2c wr: len=%d is too big!\n", wlen);
+ return -EOPNOTSUPP;
+ }
+
+- wo = (rbuf == NULL || rlen == 0); /* write-only */
++ if (rlen > MAX_XFER_SIZE) {
++ warn("i2c rd: len=%d is too big!\n", rlen);
++ return -EOPNOTSUPP;
++ }
+
+ mutex_lock(&d->data_mutex);
+ st->data[0] = cmd;
+ memcpy(&st->data[1], wbuf, wlen);
+- if (wo)
+- ret = dvb_usb_generic_write(d, st->data, 1 + wlen);
+- else
+- ret = dvb_usb_generic_rw(d, st->data, 1 + wlen,
+- rbuf, rlen, 0);
++ ret = dvb_usb_generic_rw(d, st->data, 1 + wlen, st->data, rlen, 0);
++ if (!ret && rbuf && rlen)
++ memcpy(rbuf, st->data, rlen);
+
+ mutex_unlock(&d->data_mutex);
+ return ret;
+diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+index dd048a7..b8d2ac5 100644
+--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
++++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+@@ -35,42 +35,51 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le
+
+ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
+ {
+- struct hexline hx;
+- u8 reset;
+- int ret,pos=0;
++ struct hexline *hx;
++ u8 *buf;
++ int ret, pos = 0;
++ u16 cpu_cs_register = cypress[type].cpu_cs_register;
++
++ buf = kmalloc(sizeof(*hx), GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++ hx = (struct hexline *)buf;
+
+ /* stop the CPU */
+- reset = 1;
+- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
++ buf[0] = 1;
++ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
+ err("could not stop the USB controller CPU.");
+
+- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
+- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
+- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
++ while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
++ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
++ ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
+
+- if (ret != hx.len) {
++ if (ret != hx->len) {
+ err("error while transferring firmware "
+ "(transferred size: %d, block size: %d)",
+- ret,hx.len);
++ ret, hx->len);
+ ret = -EINVAL;
+ break;
+ }
+ }
+ if (ret < 0) {
+ err("firmware download failed at %d with %d",pos,ret);
++ kfree(buf);
+ return ret;
+ }
+
+ if (ret == 0) {
+ /* restart the CPU */
+- reset = 0;
+- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
++ buf[0] = 0;
++ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
+ err("could not restart the USB controller CPU.");
+ ret = -EINVAL;
+ }
+ } else
+ ret = -EIO;
+
++ kfree(buf);
++
+ return ret;
+ }
+ EXPORT_SYMBOL(usb_cypress_load_firmware);
+diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
+index 368bb07..481895b 100644
+--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
++++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
+@@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
+ int work_done = 0;
+
+ u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
+- u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
++ u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
+ u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
+
+ /* Handle bus state changes */
+diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
+index e2512d5..eedf86b 100644
+--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
++++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
+@@ -528,6 +528,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
+ if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
+ return 0;
+
++ if (!spec_priv->rfs_chan_spec_scan)
++ return 1;
++
+ /* Output buffers are full, no need to process anything
+ * since there is no space to put the result anyway
+ */
+@@ -1072,7 +1075,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
+
+ void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
+ {
+- if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS)) {
++ if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
+ relay_close(spec_priv->rfs_chan_spec_scan);
+ spec_priv->rfs_chan_spec_scan = NULL;
+ }
+@@ -1086,6 +1089,9 @@ void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv,
+ debugfs_phy,
+ 1024, 256, &rfs_spec_scan_cb,
+ NULL);
++ if (!spec_priv->rfs_chan_spec_scan)
++ return;
++
+ debugfs_create_file("spectral_scan_ctl",
+ S_IRUSR | S_IWUSR,
+ debugfs_phy, spec_priv,
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index 23d4a17..351bac8 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
+ rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
+ if (rc < 0)
+ goto out_unlock;
++ nvdimm_bus_unlock(&nvdimm_bus->dev);
++
+ if (copy_to_user(p, buf, buf_len))
+ rc = -EFAULT;
++
++ vfree(buf);
++ return rc;
++
+ out_unlock:
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+ out:
+diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
+index d614493..dcb32f3 100644
+--- a/drivers/nvdimm/dimm_devs.c
++++ b/drivers/nvdimm/dimm_devs.c
+@@ -388,7 +388,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
+
+ int alias_dpa_busy(struct device *dev, void *data)
+ {
+- resource_size_t map_end, blk_start, new, busy;
++ resource_size_t map_end, blk_start, new;
+ struct blk_alloc_info *info = data;
+ struct nd_mapping *nd_mapping;
+ struct nd_region *nd_region;
+@@ -429,29 +429,19 @@ int alias_dpa_busy(struct device *dev, void *data)
+ retry:
+ /*
+ * Find the free dpa from the end of the last pmem allocation to
+- * the end of the interleave-set mapping that is not already
+- * covered by a blk allocation.
++ * the end of the interleave-set mapping.
+ */
+- busy = 0;
+ for_each_dpa_resource(ndd, res) {
++ if (strncmp(res->name, "pmem", 4) != 0)
++ continue;
+ if ((res->start >= blk_start && res->start < map_end)
+ || (res->end >= blk_start
+ && res->end <= map_end)) {
+- if (strncmp(res->name, "pmem", 4) == 0) {
+- new = max(blk_start, min(map_end + 1,
+- res->end + 1));
+- if (new != blk_start) {
+- blk_start = new;
+- goto retry;
+- }
+- } else
+- busy += min(map_end, res->end)
+- - max(nd_mapping->start, res->start) + 1;
+- } else if (nd_mapping->start > res->start
+- && map_end < res->end) {
+- /* total eclipse of the PMEM region mapping */
+- busy += nd_mapping->size;
+- break;
++ new = max(blk_start, min(map_end + 1, res->end + 1));
++ if (new != blk_start) {
++ blk_start = new;
++ goto retry;
++ }
+ }
+ }
+
+@@ -463,52 +453,11 @@ int alias_dpa_busy(struct device *dev, void *data)
+ return 1;
+ }
+
+- info->available -= blk_start - nd_mapping->start + busy;
++ info->available -= blk_start - nd_mapping->start;
+
+ return 0;
+ }
+
+-static int blk_dpa_busy(struct device *dev, void *data)
+-{
+- struct blk_alloc_info *info = data;
+- struct nd_mapping *nd_mapping;
+- struct nd_region *nd_region;
+- resource_size_t map_end;
+- int i;
+-
+- if (!is_nd_pmem(dev))
+- return 0;
+-
+- nd_region = to_nd_region(dev);
+- for (i = 0; i < nd_region->ndr_mappings; i++) {
+- nd_mapping = &nd_region->mapping[i];
+- if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
+- break;
+- }
+-
+- if (i >= nd_region->ndr_mappings)
+- return 0;
+-
+- map_end = nd_mapping->start + nd_mapping->size - 1;
+- if (info->res->start >= nd_mapping->start
+- && info->res->start < map_end) {
+- if (info->res->end <= map_end) {
+- info->busy = 0;
+- return 1;
+- } else {
+- info->busy -= info->res->end - map_end;
+- return 0;
+- }
+- } else if (info->res->end >= nd_mapping->start
+- && info->res->end <= map_end) {
+- info->busy -= nd_mapping->start - info->res->start;
+- return 0;
+- } else {
+- info->busy -= nd_mapping->size;
+- return 0;
+- }
+-}
+-
+ /**
+ * nd_blk_available_dpa - account the unused dpa of BLK region
+ * @nd_mapping: container of dpa-resource-root + labels
+@@ -538,11 +487,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
+ for_each_dpa_resource(ndd, res) {
+ if (strncmp(res->name, "blk", 3) != 0)
+ continue;
+-
+- info.res = res;
+- info.busy = resource_size(res);
+- device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
+- info.available -= info.busy;
++ info.available -= resource_size(res);
+ }
+
+ return info.available;
+diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
+index a66192f..c29b9b6 100644
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -1846,11 +1846,24 @@ static int __init acer_wmi_enable_lm(void)
+ return status;
+ }
+
++#define ACER_WMID_ACCEL_HID "BST0001"
++
+ static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
+ void *ctx, void **retval)
+ {
++ struct acpi_device *dev;
++
++ if (!strcmp(ctx, "SENR")) {
++ if (acpi_bus_get_device(ah, &dev))
++ return AE_OK;
++ if (!strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
++ return AE_OK;
++ } else
++ return AE_OK;
++
+ *(acpi_handle *)retval = ah;
+- return AE_OK;
++
++ return AE_CTRL_TERMINATE;
+ }
+
+ static int __init acer_wmi_get_handle(const char *name, const char *prop,
+@@ -1877,7 +1890,7 @@ static int __init acer_wmi_accel_setup(void)
+ {
+ int err;
+
+- err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle);
++ err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle);
+ if (err)
+ return err;
+
+@@ -2233,10 +2246,11 @@ static int __init acer_wmi_init(void)
+ err = acer_wmi_input_setup();
+ if (err)
+ return err;
++ err = acer_wmi_accel_setup();
++ if (err)
++ return err;
+ }
+
+- acer_wmi_accel_setup();
+-
+ err = platform_driver_register(&acer_platform_driver);
+ if (err) {
+ pr_err("Unable to register platform driver\n");
+diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
+index ef89df1..744d561 100644
+--- a/drivers/pwm/pwm-rockchip.c
++++ b/drivers/pwm/pwm-rockchip.c
+@@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ return 0;
+ }
+
++static int rockchip_pwm_enable(struct pwm_chip *chip,
++ struct pwm_device *pwm,
++ bool enable,
++ enum pwm_polarity polarity)
++{
++ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
++ int ret;
++
++ if (enable) {
++ ret = clk_enable(pc->clk);
++ if (ret)
++ return ret;
++ }
++
++ pc->data->set_enable(chip, pwm, enable, polarity);
++
++ if (!enable)
++ clk_disable(pc->clk);
++
++ return 0;
++}
++
+ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+ {
+@@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ return ret;
+
+ if (state->polarity != curstate.polarity && enabled) {
+- pc->data->set_enable(chip, pwm, false, state->polarity);
++ ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
++ if (ret)
++ goto out;
+ enabled = false;
+ }
+
+ ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
+ if (ret) {
+ if (enabled != curstate.enabled)
+- pc->data->set_enable(chip, pwm, !enabled,
+- state->polarity);
+-
++ rockchip_pwm_enable(chip, pwm, !enabled,
++ state->polarity);
+ goto out;
+ }
+
+- if (state->enabled != enabled)
+- pc->data->set_enable(chip, pwm, state->enabled,
+- state->polarity);
++ if (state->enabled != enabled) {
++ ret = rockchip_pwm_enable(chip, pwm, state->enabled,
++ state->polarity);
++ if (ret)
++ goto out;
++ }
+
+ /*
+ * Update the state with the real hardware, which can differ a bit
+diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
+index 3853ba9..19e03d0 100644
+--- a/drivers/rtc/rtc-tegra.c
++++ b/drivers/rtc/rtc-tegra.c
+@@ -18,6 +18,7 @@
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+ #include <linux/kernel.h>
++#include <linux/clk.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+@@ -59,6 +60,7 @@ struct tegra_rtc_info {
+ struct platform_device *pdev;
+ struct rtc_device *rtc_dev;
+ void __iomem *rtc_base; /* NULL if not initialized. */
++ struct clk *clk;
+ int tegra_rtc_irq; /* alarm and periodic irq */
+ spinlock_t tegra_rtc_lock;
+ };
+@@ -326,6 +328,14 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
+ if (info->tegra_rtc_irq <= 0)
+ return -EBUSY;
+
++ info->clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(info->clk))
++ return PTR_ERR(info->clk);
++
++ ret = clk_prepare_enable(info->clk);
++ if (ret < 0)
++ return ret;
++
+ /* set context info. */
+ info->pdev = pdev;
+ spin_lock_init(&info->tegra_rtc_lock);
+@@ -346,7 +356,7 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
+ ret = PTR_ERR(info->rtc_dev);
+ dev_err(&pdev->dev, "Unable to register device (err=%d).\n",
+ ret);
+- return ret;
++ goto disable_clk;
+ }
+
+ ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
+@@ -356,12 +366,25 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
+ dev_err(&pdev->dev,
+ "Unable to request interrupt for device (err=%d).\n",
+ ret);
+- return ret;
++ goto disable_clk;
+ }
+
+ dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
+
+ return 0;
++
++disable_clk:
++ clk_disable_unprepare(info->clk);
++ return ret;
++}
++
++static int tegra_rtc_remove(struct platform_device *pdev)
++{
++ struct tegra_rtc_info *info = platform_get_drvdata(pdev);
++
++ clk_disable_unprepare(info->clk);
++
++ return 0;
+ }
+
+ #ifdef CONFIG_PM_SLEEP
+@@ -413,6 +436,7 @@ static void tegra_rtc_shutdown(struct platform_device *pdev)
+
+ MODULE_ALIAS("platform:tegra_rtc");
+ static struct platform_driver tegra_rtc_driver = {
++ .remove = tegra_rtc_remove,
+ .shutdown = tegra_rtc_shutdown,
+ .driver = {
+ .name = "tegra_rtc",
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 4f361d8..734e592 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -968,8 +968,13 @@ static inline
+ uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
+ {
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
++ struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
+
+- return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT);
++ if (IS_P3P_TYPE(ha))
++ return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
++ else
++ return ((RD_REG_DWORD(&reg->host_status)) ==
++ ISP_REG_DISCONNECT);
+ }
+
+ /**************************************************************************
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 51e5629..931af07 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2057,6 +2057,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
+
+ #define READ_CAPACITY_RETRIES_ON_RESET 10
+
++/*
++ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
++ * and the reported logical block size is bigger than 512 bytes. Note
++ * that last_sector is a u64 and therefore logical_to_sectors() is not
++ * applicable.
++ */
++static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
++{
++ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
++
++ if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
++ return false;
++
++ return true;
++}
++
+ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
+ unsigned char *buffer)
+ {
+@@ -2122,7 +2138,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
+ return -ENODEV;
+ }
+
+- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
++ if (!sd_addressable_capacity(lba, sector_size)) {
+ sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
+ "kernel compiled with support for large block "
+ "devices.\n");
+@@ -2208,7 +2224,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
+ return sector_size;
+ }
+
+- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
++ if (!sd_addressable_capacity(lba, sector_size)) {
+ sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
+ "kernel compiled with support for large block "
+ "devices.\n");
+@@ -2877,7 +2893,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
+ rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+ } else
+- rw_max = BLK_DEF_MAX_SECTORS;
++ rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
++ (sector_t)BLK_DEF_MAX_SECTORS);
+
+ /* Combine with controller limits */
+ q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index bed2bbd..e635973 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -833,6 +833,7 @@ static void get_capabilities(struct scsi_cd *cd)
+ unsigned char *buffer;
+ struct scsi_mode_data data;
+ struct scsi_sense_hdr sshdr;
++ unsigned int ms_len = 128;
+ int rc, n;
+
+ static const char *loadmech[] =
+@@ -859,10 +860,11 @@ static void get_capabilities(struct scsi_cd *cd)
+ scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
+
+ /* ask for mode page 0x2a */
+- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
++ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
+ SR_TIMEOUT, 3, &data, NULL);
+
+- if (!scsi_status_is_good(rc)) {
++ if (!scsi_status_is_good(rc) || data.length > ms_len ||
++ data.header_length + data.block_descriptor_length > data.length) {
+ /* failed, drive doesn't have capabilities mode page */
+ cd->cdi.speed = 1;
+ cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
+diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
+index 0efa80b..4a073339a 100644
+--- a/drivers/target/iscsi/iscsi_target_parameters.c
++++ b/drivers/target/iscsi/iscsi_target_parameters.c
+@@ -782,22 +782,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+ if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ /*
+- * The GlobalSAN iSCSI Initiator for MacOSX does
+- * not respond to MaxBurstLength, FirstBurstLength,
+- * DefaultTime2Wait or DefaultTime2Retain parameter keys.
+- * So, we set them to 'reply optional' here, and assume the
+- * the defaults from iscsi_parameters.h if the initiator
+- * is not RFC compliant and the keys are not negotiated.
+- */
+- if (!strcmp(param->name, MAXBURSTLENGTH))
+- SET_PSTATE_REPLY_OPTIONAL(param);
+- if (!strcmp(param->name, FIRSTBURSTLENGTH))
+- SET_PSTATE_REPLY_OPTIONAL(param);
+- if (!strcmp(param->name, DEFAULTTIME2WAIT))
+- SET_PSTATE_REPLY_OPTIONAL(param);
+- if (!strcmp(param->name, DEFAULTTIME2RETAIN))
+- SET_PSTATE_REPLY_OPTIONAL(param);
+- /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, MAXCONNECTIONS))
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index 1f38177..da5a5fc 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -735,21 +735,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
+ {
+ struct se_cmd *se_cmd = NULL;
+ int rc;
++ bool op_scsi = false;
+ /*
+ * Determine if a struct se_cmd is associated with
+ * this struct iscsi_cmd.
+ */
+ switch (cmd->iscsi_opcode) {
+ case ISCSI_OP_SCSI_CMD:
+- se_cmd = &cmd->se_cmd;
+- __iscsit_free_cmd(cmd, true, shutdown);
++ op_scsi = true;
+ /*
+ * Fallthrough
+ */
+ case ISCSI_OP_SCSI_TMFUNC:
+- rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
+- if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
+- __iscsit_free_cmd(cmd, true, shutdown);
++ se_cmd = &cmd->se_cmd;
++ __iscsit_free_cmd(cmd, op_scsi, shutdown);
++ rc = transport_generic_free_cmd(se_cmd, shutdown);
++ if (!rc && shutdown && se_cmd->se_sess) {
++ __iscsit_free_cmd(cmd, op_scsi, shutdown);
+ target_put_sess_cmd(se_cmd);
+ }
+ break;
+diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
+index 31a096a..6e456de 100644
+--- a/drivers/target/target_core_fabric_configfs.c
++++ b/drivers/target/target_core_fabric_configfs.c
+@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
+ pr_err("Source se_lun->lun_se_dev does not exist\n");
+ return -EINVAL;
+ }
++ if (lun->lun_shutdown) {
++ pr_err("Unable to create mappedlun symlink because"
++ " lun->lun_shutdown=true\n");
++ return -EINVAL;
++ }
+ se_tpg = lun->lun_tpg;
+
+ nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index 2744251..1949f50 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -640,6 +640,8 @@ void core_tpg_remove_lun(
+ */
+ struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+
++ lun->lun_shutdown = true;
++
+ core_clear_lun_from_tpg(lun, tpg);
+ /*
+ * Wait for any active I/O references to percpu se_lun->lun_ref to
+@@ -661,6 +663,8 @@ void core_tpg_remove_lun(
+ }
+ if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
+ hlist_del_rcu(&lun->link);
++
++ lun->lun_shutdown = false;
+ mutex_unlock(&tpg->tpg_lun_mutex);
+
+ percpu_ref_exit(&lun->lun_ref);
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index 70c143a..1a83456 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -306,24 +306,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
+ DATA_BLOCK_BITS);
+ }
+
+-static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
+- struct scatterlist *data_sg, unsigned int data_nents)
++static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
++ bool bidi)
+ {
++ struct se_cmd *se_cmd = cmd->se_cmd;
+ int i, block;
+ int block_remaining = 0;
+ void *from, *to;
+ size_t copy_bytes, from_offset;
+- struct scatterlist *sg;
++ struct scatterlist *sg, *data_sg;
++ unsigned int data_nents;
++ DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
++
++ bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
++
++ if (!bidi) {
++ data_sg = se_cmd->t_data_sg;
++ data_nents = se_cmd->t_data_nents;
++ } else {
++ uint32_t count;
++
++ /*
++ * For bidi case, the first count blocks are for Data-Out
++ * buffer blocks, and before gathering the Data-In buffer
++ * the Data-Out buffer blocks should be discarded.
++ */
++ count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
++ while (count--) {
++ block = find_first_bit(bitmap, DATA_BLOCK_BITS);
++ clear_bit(block, bitmap);
++ }
++
++ data_sg = se_cmd->t_bidi_data_sg;
++ data_nents = se_cmd->t_bidi_data_nents;
++ }
+
+ for_each_sg(data_sg, sg, data_nents, i) {
+ int sg_remaining = sg->length;
+ to = kmap_atomic(sg_page(sg)) + sg->offset;
+ while (sg_remaining > 0) {
+ if (block_remaining == 0) {
+- block = find_first_bit(cmd_bitmap,
++ block = find_first_bit(bitmap,
+ DATA_BLOCK_BITS);
+ block_remaining = DATA_BLOCK_SIZE;
+- clear_bit(block, cmd_bitmap);
++ clear_bit(block, bitmap);
+ }
+ copy_bytes = min_t(size_t, sg_remaining,
+ block_remaining);
+@@ -389,6 +415,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
+ return true;
+ }
+
++static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
++{
++ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
++ size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
++
++ if (se_cmd->se_cmd_flags & SCF_BIDI) {
++ BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
++ data_length += round_up(se_cmd->t_bidi_data_sg->length,
++ DATA_BLOCK_SIZE);
++ }
++
++ return data_length;
++}
++
++static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
++{
++ size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
++
++ return data_length / DATA_BLOCK_SIZE;
++}
++
+ static sense_reason_t
+ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+ {
+@@ -402,7 +449,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+ uint32_t cmd_head;
+ uint64_t cdb_off;
+ bool copy_to_data_area;
+- size_t data_length;
++ size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+ DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
+
+ if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
+@@ -416,8 +463,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+ * expensive to tell how many regions are freed in the bitmap
+ */
+ base_command_size = max(offsetof(struct tcmu_cmd_entry,
+- req.iov[se_cmd->t_bidi_data_nents +
+- se_cmd->t_data_nents]),
++ req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
+ sizeof(struct tcmu_cmd_entry));
+ command_size = base_command_size
+ + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
+@@ -428,11 +474,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+
+ mb = udev->mb_addr;
+ cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+- data_length = se_cmd->data_length;
+- if (se_cmd->se_cmd_flags & SCF_BIDI) {
+- BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+- data_length += se_cmd->t_bidi_data_sg->length;
+- }
+ if ((command_size > (udev->cmdr_size / 2)) ||
+ data_length > udev->data_size) {
+ pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
+@@ -502,11 +543,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+ entry->req.iov_dif_cnt = 0;
+
+ /* Handle BIDI commands */
+- iov_cnt = 0;
+- alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+- se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
+- entry->req.iov_bidi_cnt = iov_cnt;
+-
++ if (se_cmd->se_cmd_flags & SCF_BIDI) {
++ iov_cnt = 0;
++ iov++;
++ alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
++ se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
++ false);
++ entry->req.iov_bidi_cnt = iov_cnt;
++ }
+ /* cmd's data_bitmap is what changed in process */
+ bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
+ DATA_BLOCK_BITS);
+@@ -582,19 +626,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
+ se_cmd->scsi_sense_length);
+ free_data_area(udev, cmd);
+ } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+- DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+-
+ /* Get Data-In buffer before clean up */
+- bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+- gather_data_area(udev, bitmap,
+- se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
++ gather_data_area(udev, cmd, true);
+ free_data_area(udev, cmd);
+ } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+- DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+-
+- bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+- gather_data_area(udev, bitmap,
+- se_cmd->t_data_sg, se_cmd->t_data_nents);
++ gather_data_area(udev, cmd, false);
+ free_data_area(udev, cmd);
+ } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
+ free_data_area(udev, cmd);
+diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
+index 37a37c4..6f2e729 100644
+--- a/drivers/video/fbdev/efifb.c
++++ b/drivers/video/fbdev/efifb.c
+@@ -10,6 +10,7 @@
+ #include <linux/efi.h>
+ #include <linux/errno.h>
+ #include <linux/fb.h>
++#include <linux/pci.h>
+ #include <linux/platform_device.h>
+ #include <linux/screen_info.h>
+ #include <video/vga.h>
+@@ -118,6 +119,8 @@ static inline bool fb_base_is_valid(void)
+ return false;
+ }
+
++static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
++
+ static int efifb_probe(struct platform_device *dev)
+ {
+ struct fb_info *info;
+@@ -127,7 +130,7 @@ static int efifb_probe(struct platform_device *dev)
+ unsigned int size_total;
+ char *option = NULL;
+
+- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
++ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
+ return -ENODEV;
+
+ if (fb_get_options("efifb", &option))
+@@ -327,3 +330,64 @@ static struct platform_driver efifb_driver = {
+ };
+
+ builtin_platform_driver(efifb_driver);
++
++#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
++
++static bool pci_bar_found; /* did we find a BAR matching the efifb base? */
++
++static void claim_efifb_bar(struct pci_dev *dev, int idx)
++{
++ u16 word;
++
++ pci_bar_found = true;
++
++ pci_read_config_word(dev, PCI_COMMAND, &word);
++ if (!(word & PCI_COMMAND_MEMORY)) {
++ pci_dev_disabled = true;
++ dev_err(&dev->dev,
++ "BAR %d: assigned to efifb but device is disabled!\n",
++ idx);
++ return;
++ }
++
++ if (pci_claim_resource(dev, idx)) {
++ pci_dev_disabled = true;
++ dev_err(&dev->dev,
++ "BAR %d: failed to claim resource for efifb!\n", idx);
++ return;
++ }
++
++ dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
++}
++
++static void efifb_fixup_resources(struct pci_dev *dev)
++{
++ u64 base = screen_info.lfb_base;
++ u64 size = screen_info.lfb_size;
++ int i;
++
++ if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
++ return;
++
++ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
++ base |= (u64)screen_info.ext_lfb_base << 32;
++
++ if (!base)
++ return;
++
++ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
++ struct resource *res = &dev->resource[i];
++
++ if (!(res->flags & IORESOURCE_MEM))
++ continue;
++
++ if (res->start <= base && res->end >= base + size - 1) {
++ claim_efifb_bar(dev, i);
++ break;
++ }
++ }
++}
++DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
++ 16, efifb_fixup_resources);
++
++#endif
+diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
+index 0567d51..ea2f19f 100644
+--- a/drivers/video/fbdev/xen-fbfront.c
++++ b/drivers/video/fbdev/xen-fbfront.c
+@@ -644,7 +644,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
+ break;
+
+ case XenbusStateInitWait:
+-InitWait:
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+@@ -655,7 +654,8 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
+ * get Connected twice here.
+ */
+ if (dev->state != XenbusStateConnected)
+- goto InitWait; /* no InitWait seen yet, fudge it */
++ /* no InitWait seen yet, fudge it */
++ xenbus_switch_state(dev, XenbusStateConnected);
+
+ if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "request-update", "%d", &val) < 0)
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 1cd0e2e..3925758 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
+ wdata->credits = credits;
+
+ if (!wdata->cfile->invalidHandle ||
+- !cifs_reopen_file(wdata->cfile, false))
++ !(rc = cifs_reopen_file(wdata->cfile, false)))
+ rc = server->ops->async_writev(wdata,
+ cifs_uncached_writedata_release);
+ if (rc) {
+@@ -3002,7 +3002,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
+ rdata->credits = credits;
+
+ if (!rdata->cfile->invalidHandle ||
+- !cifs_reopen_file(rdata->cfile, true))
++ !(rc = cifs_reopen_file(rdata->cfile, true)))
+ rc = server->ops->async_readv(rdata);
+ error:
+ if (rc) {
+@@ -3577,7 +3577,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
+ }
+
+ if (!rdata->cfile->invalidHandle ||
+- !cifs_reopen_file(rdata->cfile, true))
++ !(rc = cifs_reopen_file(rdata->cfile, true)))
+ rc = server->ops->async_readv(rdata);
+ if (rc) {
+ add_credits_and_wake_if(server, rdata->credits, 0);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index bdd3292..7080dac 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1987,6 +1987,9 @@ void smb2_reconnect_server(struct work_struct *work)
+ struct cifs_tcon *tcon, *tcon2;
+ struct list_head tmp_list;
+ int tcon_exist = false;
++ int rc;
++ int resched = false;
++
+
+ /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
+ mutex_lock(&server->reconnect_mutex);
+@@ -2014,13 +2017,18 @@ void smb2_reconnect_server(struct work_struct *work)
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
+- if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon))
++ rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
++ if (!rc)
+ cifs_reopen_persistent_handles(tcon);
++ else
++ resched = true;
+ list_del_init(&tcon->rlist);
+ cifs_put_tcon(tcon);
+ }
+
+ cifs_dbg(FYI, "Reconnecting tcons finished\n");
++ if (resched)
++ queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
+ mutex_unlock(&server->reconnect_mutex);
+
+ /* now we can safely release srv struct */
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index dc9d64a..c78fce4 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -71,10 +71,9 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
+ csum_size);
+ offset += csum_size;
+- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+- EXT4_INODE_SIZE(inode->i_sb) -
+- offset);
+ }
++ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
++ EXT4_INODE_SIZE(inode->i_sb) - offset);
+ }
+
+ return csum;
+diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
+index f419dd9..fe2cbeb 100644
+--- a/fs/orangefs/devorangefs-req.c
++++ b/fs/orangefs/devorangefs-req.c
+@@ -208,14 +208,19 @@ static ssize_t orangefs_devreq_read(struct file *file,
+ continue;
+ /*
+ * Skip ops whose filesystem we don't know about unless
+- * it is being mounted.
++ * it is being mounted or unmounted. It is possible for
++ * a filesystem we don't know about to be unmounted if
++ * it fails to mount in the kernel after userspace has
++ * been sent the mount request.
+ */
+ /* XXX: is there a better way to detect this? */
+ } else if (ret == -1 &&
+ !(op->upcall.type ==
+ ORANGEFS_VFS_OP_FS_MOUNT ||
+ op->upcall.type ==
+- ORANGEFS_VFS_OP_GETATTR)) {
++ ORANGEFS_VFS_OP_GETATTR ||
++ op->upcall.type ==
++ ORANGEFS_VFS_OP_FS_UMOUNT)) {
+ gossip_debug(GOSSIP_DEV_DEBUG,
+ "orangefs: skipping op tag %llu %s\n",
+ llu(op->tag), get_opname_string(op));
+diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
+index 3bf803d..45dd8f2 100644
+--- a/fs/orangefs/orangefs-kernel.h
++++ b/fs/orangefs/orangefs-kernel.h
+@@ -249,6 +249,7 @@ struct orangefs_sb_info_s {
+ char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
+ struct super_block *sb;
+ int mount_pending;
++ int no_list;
+ struct list_head list;
+ };
+
+diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
+index cd261c8..629d8c9 100644
+--- a/fs/orangefs/super.c
++++ b/fs/orangefs/super.c
+@@ -493,7 +493,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
+
+ if (ret) {
+ d = ERR_PTR(ret);
+- goto free_op;
++ goto free_sb_and_op;
+ }
+
+ /*
+@@ -519,6 +519,9 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
+ spin_unlock(&orangefs_superblocks_lock);
+ op_release(new_op);
+
++ /* Must be removed from the list now. */
++ ORANGEFS_SB(sb)->no_list = 0;
++
+ if (orangefs_userspace_version >= 20906) {
+ new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
+ if (!new_op)
+@@ -533,6 +536,10 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
+
+ return dget(sb->s_root);
+
++free_sb_and_op:
++ /* Will call orangefs_kill_sb with sb not in list. */
++ ORANGEFS_SB(sb)->no_list = 1;
++ deactivate_locked_super(sb);
+ free_op:
+ gossip_err("orangefs_mount: mount request failed with %d\n", ret);
+ if (ret == -EINVAL) {
+@@ -558,12 +565,14 @@ void orangefs_kill_sb(struct super_block *sb)
+ */
+ orangefs_unmount_sb(sb);
+
+- /* remove the sb from our list of orangefs specific sb's */
+-
+- spin_lock(&orangefs_superblocks_lock);
+- __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */
+- ORANGEFS_SB(sb)->list.prev = NULL;
+- spin_unlock(&orangefs_superblocks_lock);
++ if (!ORANGEFS_SB(sb)->no_list) {
++ /* remove the sb from our list of orangefs specific sb's */
++ spin_lock(&orangefs_superblocks_lock);
++ /* not list_del_init */
++ __list_del_entry(&ORANGEFS_SB(sb)->list);
++ ORANGEFS_SB(sb)->list.prev = NULL;
++ spin_unlock(&orangefs_superblocks_lock);
++ }
+
+ /*
+ * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 35b92d8..b1517b6 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -899,7 +899,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
+ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+ {
+- pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
++ pmd_t pmd = *pmdp;
++
++ /* See comment in change_huge_pmd() */
++ pmdp_invalidate(vma, addr, pmdp);
++ if (pmd_dirty(*pmdp))
++ pmd = pmd_mkdirty(pmd);
++ if (pmd_young(*pmdp))
++ pmd = pmd_mkyoung(pmd);
+
+ pmd = pmd_wrprotect(pmd);
+ pmd = pmd_clear_soft_dirty(pmd);
+diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
+index 1d4f365..f6d9af3e 100644
+--- a/include/crypto/internal/hash.h
++++ b/include/crypto/internal/hash.h
+@@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance(
+ return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
+ }
+
++static inline void ahash_request_complete(struct ahash_request *req, int err)
++{
++ req->base.complete(&req->base, err);
++}
++
++static inline u32 ahash_request_flags(struct ahash_request *req)
++{
++ return req->base.flags;
++}
++
+ static inline struct crypto_ahash *crypto_spawn_ahash(
+ struct crypto_ahash_spawn *spawn)
+ {
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index c83c23f..307ae63 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
+ pr_cont_kernfs_path(cgrp->kn);
+ }
+
++static inline void cgroup_init_kthreadd(void)
++{
++ /*
++ * kthreadd is inherited by all kthreads, keep it in the root so
++ * that the new kthreads are guaranteed to stay in the root until
++ * initialization is finished.
++ */
++ current->no_cgroup_migration = 1;
++}
++
++static inline void cgroup_kthread_ready(void)
++{
++ /*
++ * This kthread finished initialization. The creator should have
++ * set PF_NO_SETAFFINITY if this kthread should stay in the root.
++ */
++ current->no_cgroup_migration = 0;
++}
++
+ #else /* !CONFIG_CGROUPS */
+
+ struct cgroup_subsys_state;
+@@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
+
+ static inline int cgroup_init_early(void) { return 0; }
+ static inline int cgroup_init(void) { return 0; }
++static inline void cgroup_init_kthreadd(void) {}
++static inline void cgroup_kthread_ready(void) {}
+
+ static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
+ struct cgroup *ancestor)
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 75d9a57..f425eb3 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1584,6 +1584,10 @@ struct task_struct {
+ #ifdef CONFIG_COMPAT_BRK
+ unsigned brk_randomized:1;
+ #endif
++#ifdef CONFIG_CGROUPS
++ /* disallow userland-initiated cgroup migration */
++ unsigned no_cgroup_migration:1;
++#endif
+
+ unsigned long atomic_flags; /* Flags needing atomic access. */
+
+diff --git a/include/linux/uio.h b/include/linux/uio.h
+index 6e22b54..c146ebc 100644
+--- a/include/linux/uio.h
++++ b/include/linux/uio.h
+@@ -39,7 +39,10 @@ struct iov_iter {
+ };
+ union {
+ unsigned long nr_segs;
+- int idx;
++ struct {
++ int idx;
++ int start_idx;
++ };
+ };
+ };
+
+@@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
+ size_t iov_iter_copy_from_user_atomic(struct page *page,
+ struct iov_iter *i, unsigned long offset, size_t bytes);
+ void iov_iter_advance(struct iov_iter *i, size_t bytes);
++void iov_iter_revert(struct iov_iter *i, size_t bytes);
+ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+ size_t iov_iter_single_seg_count(const struct iov_iter *i);
+ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 6233e8f..0383c60 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -705,6 +705,7 @@ struct se_lun {
+ u64 unpacked_lun;
+ #define SE_LUN_LINK_MAGIC 0xffff7771
+ u32 lun_link_magic;
++ bool lun_shutdown;
+ bool lun_access_ro;
+ u32 lun_index;
+
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 4e2f3de..a3d2aad 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2920,11 +2920,12 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
+ tsk = tsk->group_leader;
+
+ /*
+- * Workqueue threads may acquire PF_NO_SETAFFINITY and become
+- * trapped in a cpuset, or RT worker may be born in a cgroup
+- * with no rt_runtime allocated. Just say no.
++ * kthreads may acquire PF_NO_SETAFFINITY during initialization.
++ * If userland migrates such a kthread to a non-root cgroup, it can
++ * become trapped in a cpuset, or RT kthread may be born in a
++ * cgroup with no rt_runtime allocated. Just say no.
+ */
+- if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
++ if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
+ ret = -EINVAL;
+ goto out_unlock_rcu;
+ }
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index be2cc1f..c2c911a 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -18,6 +18,7 @@
+ #include <linux/freezer.h>
+ #include <linux/ptrace.h>
+ #include <linux/uaccess.h>
++#include <linux/cgroup.h>
+ #include <trace/events/sched.h>
+
+ static DEFINE_SPINLOCK(kthread_create_lock);
+@@ -205,6 +206,7 @@ static int kthread(void *_create)
+ ret = -EINTR;
+
+ if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
++ cgroup_kthread_ready();
+ __kthread_parkme(&self);
+ ret = threadfn(data);
+ }
+@@ -530,6 +532,7 @@ int kthreadd(void *unused)
+ set_mems_allowed(node_states[N_MEMORY]);
+
+ current->flags |= PF_NOFREEZE;
++ cgroup_init_kthreadd();
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index da87b3c..221eb59 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3736,23 +3736,24 @@ static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
+ ftrace_probe_registered = 1;
+ }
+
+-static void __disable_ftrace_function_probe(void)
++static bool __disable_ftrace_function_probe(void)
+ {
+ int i;
+
+ if (!ftrace_probe_registered)
+- return;
++ return false;
+
+ for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
+ struct hlist_head *hhd = &ftrace_func_hash[i];
+ if (hhd->first)
+- return;
++ return false;
+ }
+
+ /* no more funcs left */
+ ftrace_shutdown(&trace_probe_ops, 0);
+
+ ftrace_probe_registered = 0;
++ return true;
+ }
+
+
+@@ -3882,6 +3883,7 @@ static void
+ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ void *data, int flags)
+ {
++ struct ftrace_ops_hash old_hash_ops;
+ struct ftrace_func_entry *rec_entry;
+ struct ftrace_func_probe *entry;
+ struct ftrace_func_probe *p;
+@@ -3893,6 +3895,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ struct hlist_node *tmp;
+ char str[KSYM_SYMBOL_LEN];
+ int i, ret;
++ bool disabled;
+
+ if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
+ func_g.search = NULL;
+@@ -3911,6 +3914,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+
+ mutex_lock(&trace_probe_ops.func_hash->regex_lock);
+
++ old_hash_ops.filter_hash = old_hash;
++ /* Probes only have filters */
++ old_hash_ops.notrace_hash = NULL;
++
+ hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+ if (!hash)
+ /* Hmm, should report this somehow */
+@@ -3948,12 +3955,17 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
+ }
+ }
+ mutex_lock(&ftrace_lock);
+- __disable_ftrace_function_probe();
++ disabled = __disable_ftrace_function_probe();
+ /*
+ * Remove after the disable is called. Otherwise, if the last
+ * probe is removed, a null hash means *all enabled*.
+ */
+ ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
++
++ /* still need to update the function call sites */
++ if (ftrace_enabled && !disabled)
++ ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
++ &old_hash_ops);
+ synchronize_sched();
+ if (!ret)
+ free_ftrace_hash_rcu(old_hash);
+@@ -5389,6 +5401,15 @@ static void clear_ftrace_pids(struct trace_array *tr)
+ trace_free_pid_list(pid_list);
+ }
+
++void ftrace_clear_pids(struct trace_array *tr)
++{
++ mutex_lock(&ftrace_lock);
++
++ clear_ftrace_pids(tr);
++
++ mutex_unlock(&ftrace_lock);
++}
++
+ static void ftrace_pid_reset(struct trace_array *tr)
+ {
+ mutex_lock(&ftrace_lock);
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 90b66ed..862bc88 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -7150,6 +7150,7 @@ static int instance_rmdir(const char *name)
+
+ tracing_set_nop(tr);
+ event_trace_del_tracer(tr);
++ ftrace_clear_pids(tr);
+ ftrace_destroy_function_files(tr);
+ tracefs_remove_recursive(tr->dir);
+ free_trace_buffers(tr);
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index fd24b1f..b0d8576 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -870,6 +870,7 @@ int using_ftrace_ops_list_func(void);
+ void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
+ void ftrace_init_tracefs_toplevel(struct trace_array *tr,
+ struct dentry *d_tracer);
++void ftrace_clear_pids(struct trace_array *tr);
+ #else
+ static inline int ftrace_trace_task(struct trace_array *tr)
+ {
+@@ -888,6 +889,7 @@ ftrace_init_global_array_ops(struct trace_array *tr) { }
+ static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
+ static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
+ static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
++static inline void ftrace_clear_pids(struct trace_array *tr) { }
+ /* ftace_func_t type is not defined, use macro instead of static inline */
+ #define ftrace_init_array_ops(tr, func) do { } while (0)
+ #endif /* CONFIG_FUNCTION_TRACER */
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index efb0b4d..a75ea63 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -734,6 +734,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
+ }
+ EXPORT_SYMBOL(iov_iter_advance);
+
++void iov_iter_revert(struct iov_iter *i, size_t unroll)
++{
++ if (!unroll)
++ return;
++ i->count += unroll;
++ if (unlikely(i->type & ITER_PIPE)) {
++ struct pipe_inode_info *pipe = i->pipe;
++ int idx = i->idx;
++ size_t off = i->iov_offset;
++ while (1) {
++ size_t n = off - pipe->bufs[idx].offset;
++ if (unroll < n) {
++ off -= (n - unroll);
++ break;
++ }
++ unroll -= n;
++ if (!unroll && idx == i->start_idx) {
++ off = 0;
++ break;
++ }
++ if (!idx--)
++ idx = pipe->buffers - 1;
++ off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
++ }
++ i->iov_offset = off;
++ i->idx = idx;
++ pipe_truncate(i);
++ return;
++ }
++ if (unroll <= i->iov_offset) {
++ i->iov_offset -= unroll;
++ return;
++ }
++ unroll -= i->iov_offset;
++ if (i->type & ITER_BVEC) {
++ const struct bio_vec *bvec = i->bvec;
++ while (1) {
++ size_t n = (--bvec)->bv_len;
++ i->nr_segs++;
++ if (unroll <= n) {
++ i->bvec = bvec;
++ i->iov_offset = n - unroll;
++ return;
++ }
++ unroll -= n;
++ }
++ } else { /* same logics for iovec and kvec */
++ const struct iovec *iov = i->iov;
++ while (1) {
++ size_t n = (--iov)->iov_len;
++ i->nr_segs++;
++ if (unroll <= n) {
++ i->iov = iov;
++ i->iov_offset = n - unroll;
++ return;
++ }
++ unroll -= n;
++ }
++ }
++}
++EXPORT_SYMBOL(iov_iter_revert);
++
+ /*
+ * Return the count of just the current iov_iter segment.
+ */
+@@ -787,6 +849,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
+ i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
+ i->iov_offset = 0;
+ i->count = count;
++ i->start_idx = i->idx;
+ }
+ EXPORT_SYMBOL(iov_iter_pipe);
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 917555c..d5b2b75 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1380,8 +1380,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ deactivate_page(page);
+
+ if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
+- orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
+- tlb->fullmm);
++ pmdp_invalidate(vma, addr, pmd);
+ orig_pmd = pmd_mkold(orig_pmd);
+ orig_pmd = pmd_mkclean(orig_pmd);
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 0de2669..47559cc 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2152,6 +2152,8 @@ struct memcg_kmem_cache_create_work {
+ struct work_struct work;
+ };
+
++static struct workqueue_struct *memcg_kmem_cache_create_wq;
++
+ static void memcg_kmem_cache_create_func(struct work_struct *w)
+ {
+ struct memcg_kmem_cache_create_work *cw =
+@@ -2183,7 +2185,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
+ cw->cachep = cachep;
+ INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
+
+- schedule_work(&cw->work);
++ queue_work(memcg_kmem_cache_create_wq, &cw->work);
+ }
+
+ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
+@@ -5786,6 +5788,17 @@ static int __init mem_cgroup_init(void)
+ {
+ int cpu, node;
+
++#ifndef CONFIG_SLOB
++ /*
++ * Kmem cache creation is mostly done with the slab_mutex held,
++ * so use a special workqueue to avoid stalling all worker
++ * threads in case lots of cgroups are created simultaneously.
++ */
++ memcg_kmem_cache_create_wq =
++ alloc_ordered_workqueue("memcg_kmem_cache_create", 0);
++ BUG_ON(!memcg_kmem_cache_create_wq);
++#endif
++
+ hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
+
+ for_each_possible_cpu(cpu)
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index b0bc023..1689bb5 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -280,7 +280,7 @@ struct zs_pool {
+ struct zspage {
+ struct {
+ unsigned int fullness:FULLNESS_BITS;
+- unsigned int class:CLASS_BITS;
++ unsigned int class:CLASS_BITS + 1;
+ unsigned int isolated:ISOLATED_BITS;
+ unsigned int magic:MAGIC_VAL_BITS;
+ };
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index b7de71f..963732e 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -378,7 +378,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len)
+ {
+ int start = skb_headlen(skb);
+- int i, copy = start - offset;
++ int i, copy = start - offset, start_off = offset, n;
+ struct sk_buff *frag_iter;
+
+ trace_skb_copy_datagram_iovec(skb, len);
+@@ -387,11 +387,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
+ if (copy > 0) {
+ if (copy > len)
+ copy = len;
+- if (copy_to_iter(skb->data + offset, copy, to) != copy)
++ n = copy_to_iter(skb->data + offset, copy, to);
++ offset += n;
++ if (n != copy)
+ goto short_copy;
+ if ((len -= copy) == 0)
+ return 0;
+- offset += copy;
+ }
+
+ /* Copy paged appendix. Hmm... why does this look so complicated? */
+@@ -405,13 +406,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
+ if ((copy = end - offset) > 0) {
+ if (copy > len)
+ copy = len;
+- if (copy_page_to_iter(skb_frag_page(frag),
++ n = copy_page_to_iter(skb_frag_page(frag),
+ frag->page_offset + offset -
+- start, copy, to) != copy)
++ start, copy, to);
++ offset += n;
++ if (n != copy)
+ goto short_copy;
+ if (!(len -= copy))
+ return 0;
+- offset += copy;
+ }
+ start = end;
+ }
+@@ -443,6 +445,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
+ */
+
+ fault:
++ iov_iter_revert(to, offset - start_off);
+ return -EFAULT;
+
+ short_copy:
+@@ -593,7 +596,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
+ __wsum *csump)
+ {
+ int start = skb_headlen(skb);
+- int i, copy = start - offset;
++ int i, copy = start - offset, start_off = offset;
+ struct sk_buff *frag_iter;
+ int pos = 0;
+ int n;
+@@ -603,11 +606,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
+ if (copy > len)
+ copy = len;
+ n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
++ offset += n;
+ if (n != copy)
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+- offset += copy;
+ pos = copy;
+ }
+
+@@ -629,12 +632,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
+ offset - start, copy,
+ &csum2, to);
+ kunmap(page);
++ offset += n;
+ if (n != copy)
+ goto fault;
+ *csump = csum_block_add(*csump, csum2, pos);
+ if (!(len -= copy))
+ return 0;
+- offset += copy;
+ pos += copy;
+ }
+ start = end;
+@@ -667,6 +670,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
+ return 0;
+
+ fault:
++ iov_iter_revert(to, offset - start_off);
+ return -EFAULT;
+ }
+
+@@ -751,6 +755,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
+ }
+ return 0;
+ csum_error:
++ iov_iter_revert(&msg->msg_iter, chunk);
+ return -EINVAL;
+ fault:
+ return -EFAULT;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index bff4460..8d6c09f 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2166,6 +2166,8 @@ static int ip6_route_del(struct fib6_config *cfg)
+ continue;
+ if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
+ continue;
++ if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
++ continue;
+ dst_hold(&rt->dst);
+ read_unlock_bh(&table->tb6_lock);
+
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 6cbe5bd..6734420 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4735,6 +4735,12 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
+ if (!asoc)
+ return -EINVAL;
+
++ /* If there is a thread waiting on more sndbuf space for
++ * sending on this asoc, it cannot be peeled.
++ */
++ if (waitqueue_active(&asoc->wait))
++ return -EBUSY;
++
+ /* An association cannot be branched off from an already peeled-off
+ * socket, nor is this supported for tcp style sockets.
+ */
+@@ -7427,8 +7433,6 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ */
+ release_sock(sk);
+ current_timeo = schedule_timeout(current_timeo);
+- if (sk != asoc->base.sk)
+- goto do_error;
+ lock_sock(sk);
+
+ *timeo_p = current_timeo;
+diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
+index fd5d1e0..e18fe9d 100644
+--- a/sound/soc/intel/Kconfig
++++ b/sound/soc/intel/Kconfig
+@@ -33,11 +33,9 @@ config SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_MATCH if ACPI
+ depends on (X86 || COMPILE_TEST)
+
+-# firmware stuff depends DW_DMAC_CORE; since there is no depends-on from
+-# the reverse selection, each machine driver needs to select
+-# SND_SOC_INTEL_SST_FIRMWARE carefully depending on DW_DMAC_CORE
+ config SND_SOC_INTEL_SST_FIRMWARE
+ tristate
++ select DW_DMAC_CORE
+
+ config SND_SOC_INTEL_SST_ACPI
+ tristate
+@@ -47,16 +45,18 @@ config SND_SOC_INTEL_SST_MATCH
+
+ config SND_SOC_INTEL_HASWELL
+ tristate
++ select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SST_FIRMWARE
+
+ config SND_SOC_INTEL_BAYTRAIL
+ tristate
++ select SND_SOC_INTEL_SST
++ select SND_SOC_INTEL_SST_FIRMWARE
+
+ config SND_SOC_INTEL_HASWELL_MACH
+ tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
+ depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
+- depends on DW_DMAC_CORE
+- select SND_SOC_INTEL_SST
++ depends on DMADEVICES
+ select SND_SOC_INTEL_HASWELL
+ select SND_SOC_RT5640
+ help
+@@ -99,9 +99,8 @@ config SND_SOC_INTEL_BXT_RT298_MACH
+ config SND_SOC_INTEL_BYT_RT5640_MACH
+ tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
+ depends on X86_INTEL_LPSS && I2C
+- depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
+- select SND_SOC_INTEL_SST
+- select SND_SOC_INTEL_SST_FIRMWARE
++ depends on DMADEVICES
++ depends on SND_SST_IPC_ACPI = n
+ select SND_SOC_INTEL_BAYTRAIL
+ select SND_SOC_RT5640
+ help
+@@ -112,9 +111,8 @@ config SND_SOC_INTEL_BYT_RT5640_MACH
+ config SND_SOC_INTEL_BYT_MAX98090_MACH
+ tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
+ depends on X86_INTEL_LPSS && I2C
+- depends on DW_DMAC_CORE && (SND_SST_IPC_ACPI = n)
+- select SND_SOC_INTEL_SST
+- select SND_SOC_INTEL_SST_FIRMWARE
++ depends on DMADEVICES
++ depends on SND_SST_IPC_ACPI = n
+ select SND_SOC_INTEL_BAYTRAIL
+ select SND_SOC_MAX98090
+ help
+@@ -123,9 +121,8 @@ config SND_SOC_INTEL_BYT_MAX98090_MACH
+
+ config SND_SOC_INTEL_BDW_RT5677_MACH
+ tristate "ASoC Audio driver for Intel Broadwell with RT5677 codec"
+- depends on X86_INTEL_LPSS && GPIOLIB && I2C && DW_DMAC
+- depends on DW_DMAC_CORE=y
+- select SND_SOC_INTEL_SST
++ depends on X86_INTEL_LPSS && GPIOLIB && I2C
++ depends on DMADEVICES
+ select SND_SOC_INTEL_HASWELL
+ select SND_SOC_RT5677
+ help
+@@ -134,10 +131,8 @@ config SND_SOC_INTEL_BDW_RT5677_MACH
+
+ config SND_SOC_INTEL_BROADWELL_MACH
+ tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
+- depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
+- I2C_DESIGNWARE_PLATFORM
+- depends on DW_DMAC_CORE
+- select SND_SOC_INTEL_SST
++ depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
++ depends on DMADEVICES
+ select SND_SOC_INTEL_HASWELL
+ select SND_SOC_RT286
+ help
diff --git a/4.9.24/4420_grsecurity-3.1-4.9.24-201704220732.patch b/4.9.24/4420_grsecurity-3.1-4.9.24-201704220732.patch
new file mode 100644
index 0000000..8f3058d
--- /dev/null
+++ b/4.9.24/4420_grsecurity-3.1-4.9.24-201704220732.patch
@@ -0,0 +1,225996 @@
+diff --git a/Documentation/dontdiff b/Documentation/dontdiff
+index 5385cba..607c6a0 100644
+--- a/Documentation/dontdiff
++++ b/Documentation/dontdiff
+@@ -7,6 +7,7 @@
+ *.cis
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+@@ -16,6 +17,7 @@
+ *.gcov
+ *.gen.S
+ *.gif
++*.gmo
+ *.grep
+ *.grp
+ *.gz
+@@ -52,14 +54,17 @@
+ *.tab.h
+ *.tex
+ *.ver
++*.vim
+ *.xml
+ *.xz
+ *_MODULES
++*_reg_safe.h
+ *_vga16.c
+ *~
+ \#*#
+ *.9
+-.*
++.[^g]*
++.gen*
+ .*.d
+ .mm
+ 53c700_d.h
+@@ -73,9 +78,11 @@ Image
+ Module.markers
+ Module.symvers
+ PENDING
++PERF*
+ SCCS
+ System.map*
+ TAGS
++TRACEEVENT-CFLAGS
+ aconf
+ af_names.h
+ aic7*reg.h*
+@@ -84,6 +91,7 @@ aic7*seq.h*
+ aicasm
+ aicdb.h*
+ altivec*.c
++ashldi3.S
+ asm-offsets.h
+ asm_offsets.h
+ autoconf.h*
+@@ -96,11 +104,14 @@ bounds.h
+ bsetup
+ btfixupprep
+ build
++builtin-policy.h
+ bvmlinux
+ bzImage*
+ capability_names.h
+ capflags.c
+ classlist.h*
++clut_vga16.c
++common-cmds.h
+ comp*.log
+ compile.h*
+ conf
+@@ -109,19 +120,23 @@ config-*
+ config_data.h*
+ config.mak
+ config.mak.autogen
++config.tmp
+ conmakehash
+ consolemap_deftbl.c*
+ cpustr.h
+ crc32table.h*
+ cscope.*
+ defkeymap.c
++devicetable-offsets.h
+ devlist.h*
+ dnotify_test
+ docproc
+ dslm
++dtc-lexer.lex.c
+ elf2ecoff
+ elfconfig.h*
+ evergreen_reg_safe.h
++exception_policy.conf
+ fixdep
+ flask.h
+ fore200e_mkfirm
+@@ -129,12 +144,15 @@ fore200e_pca_fw.c*
+ gconf
+ gconf.glade.h
+ gen-devlist
++gen-kdb_cmds.c
+ gen_crc32table
+ gen_init_cpio
+ generated
+ genheaders
+ genksyms
+ *_gray256.c
++hash
++hid-example
+ hpet_example
+ hugepage-mmap
+ hugepage-shm
+@@ -149,14 +167,14 @@ int32.c
+ int4.c
+ int8.c
+ kallsyms
+-kconfig
++kern_constants.h
+ keywords.c
+ ksym.c*
+ ksym.h*
+ kxgettext
+ lex.c
+ lex.*.c
+-linux
++lib1funcs.S
+ logo_*.c
+ logo_*_clut224.c
+ logo_*_mono.c
+@@ -167,12 +185,14 @@ machtypes.h
+ map
+ map_hugetlb
+ mconf
++mdp
+ miboot*
+ mk_elfconfig
+ mkboot
+ mkbugboot
+ mkcpustr
+ mkdep
++mkpiggy
+ mkprep
+ mkregtable
+ mktables
+@@ -188,6 +208,8 @@ oui.c*
+ page-types
+ parse.c
+ parse.h
++parse-events*
++pasyms.h
+ patches*
+ pca200e.bin
+ pca200e_ecd.bin2
+@@ -197,6 +219,7 @@ perf-archive
+ piggyback
+ piggy.gzip
+ piggy.S
++pmu-*
+ pnmtologo
+ ppc_defs.h*
+ pss_boot.h
+@@ -206,7 +229,12 @@ r200_reg_safe.h
+ r300_reg_safe.h
+ r420_reg_safe.h
+ r600_reg_safe.h
++randomize_layout_hash.h
++randomize_layout_seed.h
++realmode.lds
++realmode.relocs
+ recordmcount
++regdb.c
+ relocs
+ rlim_names.h
+ rn50_reg_safe.h
+@@ -216,8 +244,17 @@ series
+ setup
+ setup.bin
+ setup.elf
++signing_key*
++aux.h
++disable.h
++e_fields.h
++e_fns.h
++e_fptrs.h
++e_vars.h
+ sImage
++slabinfo
+ sm_tbl*
++sortextable
+ split-include
+ syscalltab.h
+ tables.c
+@@ -227,6 +264,7 @@ tftpboot.img
+ timeconst.h
+ times.h*
+ trix_boot.h
++user_constants.h
+ utsrelease.h*
+ vdso-syms.lds
+ vdso.lds
+@@ -238,13 +276,17 @@ vdso32.lds
+ vdso32.so.dbg
+ vdso64.lds
+ vdso64.so.dbg
++vdsox32.lds
++vdsox32-syms.lds
+ version.h*
+ vmImage
+ vmlinux
+ vmlinux-*
+ vmlinux.aout
+ vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
+ vmlinuz
+ voffset.h
+ vsyscall.lds
+@@ -252,9 +294,12 @@ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
++x509*
+ zImage*
+ zconf.hash.c
++zconf.lex.c
+ zoffset.h
+diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
+index 9b9c479..5a635ff 100644
+--- a/Documentation/kbuild/makefiles.txt
++++ b/Documentation/kbuild/makefiles.txt
+@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
+ === 4 Host Program support
+ --- 4.1 Simple Host Program
+ --- 4.2 Composite Host Programs
+- --- 4.3 Using C++ for host programs
+- --- 4.4 Controlling compiler options for host programs
+- --- 4.5 When host programs are actually built
+- --- 4.6 Using hostprogs-$(CONFIG_FOO)
++ --- 4.3 Defining shared libraries
++ --- 4.4 Using C++ for host programs
++ --- 4.5 Controlling compiler options for host programs
++ --- 4.6 When host programs are actually built
++ --- 4.7 Using hostprogs-$(CONFIG_FOO)
+
+ === 5 Kbuild clean infrastructure
+
+@@ -645,7 +646,29 @@ Both possibilities are described in the following.
+ Finally, the two .o files are linked to the executable, lxdialog.
+ Note: The syntax <executable>-y is not permitted for host-programs.
+
+---- 4.3 Using C++ for host programs
++--- 4.3 Defining shared libraries
++
++ Objects with extension .so are considered shared libraries, and
++ will be compiled as position independent objects.
++ Kbuild provides support for shared libraries, but the usage
++ shall be restricted.
++ In the following example the libkconfig.so shared library is used
++ to link the executable conf.
++
++ Example:
++ #scripts/kconfig/Makefile
++ hostprogs-y := conf
++ conf-objs := conf.o libkconfig.so
++ libkconfig-objs := expr.o type.o
++
++ Shared libraries always require a corresponding -objs line, and
++ in the example above the shared library libkconfig is composed by
++ the two objects expr.o and type.o.
++ expr.o and type.o will be built as position independent code and
++ linked as a shared library libkconfig.so. C++ is not supported for
++ shared libraries.
++
++--- 4.4 Using C++ for host programs
+
+ kbuild offers support for host programs written in C++. This was
+ introduced solely to support kconfig, and is not recommended
+@@ -668,7 +691,7 @@ Both possibilities are described in the following.
+ qconf-cxxobjs := qconf.o
+ qconf-objs := check.o
+
+---- 4.4 Controlling compiler options for host programs
++--- 4.5 Controlling compiler options for host programs
+
+ When compiling host programs, it is possible to set specific flags.
+ The programs will always be compiled utilising $(HOSTCC) passed
+@@ -696,7 +719,7 @@ Both possibilities are described in the following.
+ When linking qconf, it will be passed the extra option
+ "-L$(QTDIR)/lib".
+
+---- 4.5 When host programs are actually built
++--- 4.6 When host programs are actually built
+
+ Kbuild will only build host-programs when they are referenced
+ as a prerequisite.
+@@ -727,7 +750,7 @@ Both possibilities are described in the following.
+ This will tell kbuild to build lxdialog even if not referenced in
+ any rule.
+
+---- 4.6 Using hostprogs-$(CONFIG_FOO)
++--- 4.7 Using hostprogs-$(CONFIG_FOO)
+
+ A typical pattern in a Kbuild file looks like this:
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index a6fadef..fdf6042 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1436,6 +1436,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ [KNL] Should the hard-lockup detector generate
+ backtraces on all cpus.
+ Format: <integer>
++ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
++ ignore grsecurity's /proc restrictions
++
++ grsec_sysfs_restrict= Format: 0 | 1
++ Default: 1
++ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
+
+ hashdist= [KNL,NUMA] Large hashes allocated during boot
+ are distributed across NUMA nodes. Defaults on
+@@ -2665,6 +2671,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ noexec=on: enable non-executable mappings (default)
+ noexec=off: disable non-executable mappings
+
++ nopcid [X86-64]
++ Disable PCID (Process-Context IDentifier) even if it
++ is supported by the processor.
++
+ nosmap [X86]
+ Disable SMAP (Supervisor Mode Access Prevention)
+ even if it is supported by processor.
+@@ -2973,6 +2983,35 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
++ virtualization environments that don't cope well with the
++ expand down segment used by UDEREF on X86-32 or the frequent
++ page table updates on X86-64.
++
++ pax_sanitize_slab=
++ Format: { 0 | 1 | off | fast | full }
++ Options '0' and '1' are only provided for backward
++ compatibility, 'off' or 'fast' should be used instead.
++ 0|off : disable slab object sanitization
++ 1|fast: enable slab object sanitization excluding
++ whitelisted slabs (default)
++ full : sanitize all slabs, even the whitelisted ones
++
++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
++
++ pax_extra_latent_entropy
++ Enable a very simple form of latent entropy extraction
++ from the first 4GB of memory as the bootmem allocator
++ passes the memory pages to the buddy allocator.
++
++ pax_size_overflow_report_only
++ Enables rate-limited logging of size_overflow plugin
++ violations while disabling killing of the violating
++ task.
++
++ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
++ when the processor supports PCID.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index ffab8b5..b8fcd61 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -42,6 +42,7 @@ show up in /proc/sys/kernel:
+ - kptr_restrict
+ - kstack_depth_to_print [ X86 only ]
+ - l2cr [ PPC only ]
++- modify_ldt [ X86 only ]
+ - modprobe ==> Documentation/debugging-modules.txt
+ - modules_disabled
+ - msg_next_id [ sysv ipc ]
+@@ -409,6 +410,20 @@ This flag controls the L2 cache of G3 processor boards. If
+
+ ==============================================================
+
++modify_ldt: (X86 only)
++
++Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
++(Local Descriptor Table) may be needed to run a 16-bit or segmented code
++such as Dosemu or Wine. This is done via a system call which is not needed
++to run portable applications, and which can sometimes be abused to exploit
++some weaknesses of the architecture, opening new vulnerabilities.
++
++This sysctl allows one to increase the system's security by disabling the
++system call, or to restore compatibility with specific applications when it
++was already disabled.
++
++==============================================================
++
+ modules_disabled:
+
+ A toggle value indicating if modules are allowed to be loaded
+diff --git a/Kbuild b/Kbuild
+index 3d0ae15..84e5412 100644
+--- a/Kbuild
++++ b/Kbuild
+@@ -91,6 +91,7 @@ $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
+ always += missing-syscalls
+ targets += missing-syscalls
+
++GCC_PLUGINS_missing-syscalls := n
+ quiet_cmd_syscalls = CALL $<
+ cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags)
+
+diff --git a/Makefile b/Makefile
+index 50436f5..435d355 100644
+--- a/Makefile
++++ b/Makefile
+@@ -302,7 +302,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+ HOSTCC = gcc
+ HOSTCXX = g++
+ HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
+-HOSTCXXFLAGS = -O2
++HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
++HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
++HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
+
+ ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
+ HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
+@@ -731,7 +733,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
+ else
+ KBUILD_CFLAGS += -g
+ endif
+-KBUILD_AFLAGS += -Wa,-gdwarf-2
++KBUILD_AFLAGS += -Wa,--gdwarf-2
+ endif
+ ifdef CONFIG_DEBUG_INFO_DWARF4
+ KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
+@@ -910,7 +912,7 @@ export mod_sign_cmd
+
+
+ ifeq ($(KBUILD_EXTMOD),)
+-core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
++core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+
+ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+@@ -1274,7 +1276,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
+ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
+ signing_key.pem signing_key.priv signing_key.x509 \
+ x509.genkey extra_certificates signing_key.x509.keyid \
+- signing_key.x509.signer vmlinux-gdb.py
++ signing_key.x509.signer vmlinux-gdb.py \
++ scripts/gcc-plugins/size_overflow_plugin/e_*.h \
++ scripts/gcc-plugins/size_overflow_plugin/disable.h \
++ scripts/gcc-plugins/randomize_layout_seed.h
+
+ # clean - Delete most, but leave enough to build external modules
+ #
+@@ -1314,7 +1319,7 @@ distclean: mrproper
+ @find $(srctree) $(RCS_FIND_IGNORE) \
+ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
+- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
++ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
+ -type f -print | xargs rm -f
+
+
+diff --git a/arch/Kconfig b/arch/Kconfig
+index 659bdd0..4179181 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -164,6 +164,7 @@ config ARCH_USE_BUILTIN_BSWAP
+ config KRETPROBES
+ def_bool y
+ depends on KPROBES && HAVE_KRETPROBES
++ depends on !PAX_RAP
+
+ config USER_RETURN_NOTIFIER
+ bool
+@@ -355,7 +356,7 @@ config HAVE_GCC_PLUGINS
+ menuconfig GCC_PLUGINS
+ bool "GCC plugins"
+ depends on HAVE_GCC_PLUGINS
+- depends on !COMPILE_TEST
++ default y
+ help
+ GCC plugins are loadable modules that provide extra features to the
+ compiler. They are useful for runtime instrumentation and static analysis.
+@@ -759,6 +760,7 @@ config VMAP_STACK
+ default y
+ bool "Use a virtually-mapped stack"
+ depends on HAVE_ARCH_VMAP_STACK && !KASAN
++ depends on !GRKERNSEC_KSTACKOVERFLOW
+ ---help---
+ Enable this if you want the use virtually-mapped kernel stacks
+ with guard pages. This causes kernel stack overflows to be
+diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
+index 498933a..78d2b22 100644
+--- a/arch/alpha/include/asm/atomic.h
++++ b/arch/alpha/include/asm/atomic.h
+@@ -308,4 +308,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic_dec(v) atomic_sub(1,(v))
+ #define atomic64_dec(v) atomic64_sub(1,(v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* _ALPHA_ATOMIC_H */
+diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
+index ad368a9..fbe0f25 100644
+--- a/arch/alpha/include/asm/cache.h
++++ b/arch/alpha/include/asm/cache.h
+@@ -4,19 +4,19 @@
+ #ifndef __ARCH_ALPHA_CACHE_H
+ #define __ARCH_ALPHA_CACHE_H
+
++#include <linux/const.h>
+
+ /* Bytes per L1 (data) cache line. */
+ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
+-# define L1_CACHE_BYTES 64
+ # define L1_CACHE_SHIFT 6
+ #else
+ /* Both EV4 and EV5 are write-through, read-allocate,
+ direct-mapped, physical.
+ */
+-# define L1_CACHE_BYTES 32
+ # define L1_CACHE_SHIFT 5
+ #endif
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+ #endif
+diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
+index 968d999..d36b2df 100644
+--- a/arch/alpha/include/asm/elf.h
++++ b/arch/alpha/include/asm/elf.h
+@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
+index c2ebb6f..93a0613 100644
+--- a/arch/alpha/include/asm/pgalloc.h
++++ b/arch/alpha/include/asm/pgalloc.h
+@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+ pgd_set(pgd, pmd);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+ static inline void
+diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
+index a9a1195..e9b8417 100644
+--- a/arch/alpha/include/asm/pgtable.h
++++ b/arch/alpha/include/asm/pgtable.h
+@@ -101,6 +101,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
+index 936bc8f..bb1859f 100644
+--- a/arch/alpha/kernel/module.c
++++ b/arch/alpha/kernel/module.c
+@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+- gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000;
++ gp = (u64)me->core_layout.base_rw + me->core_layout.size_rw - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
+index ffb93f49..ced8233 100644
+--- a/arch/alpha/kernel/osf_sys.c
++++ b/arch/alpha/kernel/osf_sys.c
+@@ -1300,10 +1300,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+ generic version except that we know how to honor ADDR_LIMIT_32BIT. */
+
+ static unsigned long
+-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+- unsigned long limit)
++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
++ unsigned long limit, unsigned long flags)
+ {
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ info.flags = 0;
+ info.length = len;
+@@ -1311,6 +1312,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+ info.high_limit = limit;
+ info.align_mask = 0;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+@@ -1343,20 +1345,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+ /* Finally, try allocating in low memory. */
+- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
+
+ return addr;
+ }
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 83e9eee..db02682 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -52,6 +52,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long *)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -132,8 +250,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++ do_group_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
+index 42b0504..6013221 100644
+--- a/arch/arc/kernel/kprobes.c
++++ b/arch/arc/kernel/kprobes.c
+@@ -424,6 +424,7 @@ static void __used kretprobe_trampoline_holder(void)
+ "kretprobe_trampoline:\n" "nop\n");
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -433,6 +434,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->blink = (unsigned long)&kretprobe_trampoline;
+ }
++#endif
+
+ static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+@@ -509,6 +511,7 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline_p);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
+@@ -516,6 +519,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
+ {
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index b5d529f..0bb4d4f 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1622,6 +1622,7 @@ config AEABI
+ config OABI_COMPAT
+ bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
+ depends on AEABI && !THUMB2_KERNEL
++ depends on !GRKERNSEC
+ help
+ This option preserves the old syscall interface along with the
+ new (ARM EABI) one. It also provides a compatibility layer to
+@@ -1690,6 +1691,7 @@ config HIGHPTE
+ config CPU_SW_DOMAIN_PAN
+ bool "Enable use of CPU domains to implement privileged no-access"
+ depends on MMU && !ARM_LPAE
++ depends on !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ default y
+ help
+ Increase kernel security by ensuring that normal kernel accesses
+@@ -1766,7 +1768,7 @@ config ALIGNMENT_TRAP
+
+ config UACCESS_WITH_MEMCPY
+ bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
+- depends on MMU
++ depends on MMU && !PAX_MEMORY_UDEREF
+ default y if CPU_FEROCEON
+ help
+ Implement faster copy_to_user and clear_user methods for CPU
+@@ -2021,6 +2023,7 @@ config KEXEC
+ depends on (!SMP || PM_SLEEP_SMP)
+ depends on !CPU_V7M
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -2065,7 +2068,7 @@ config EFI_STUB
+
+ config EFI
+ bool "UEFI runtime support"
+- depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL
++ depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL && !PAX_KERNEXEC
+ select UCS2_STRING
+ select EFI_PARAMS_FROM_FDT
+ select EFI_STUB
+diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
+index d83f7c3..a6aba4c 100644
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -7,6 +7,7 @@ config ARM_PTDUMP
+ depends on DEBUG_KERNEL
+ depends on MMU
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ Say Y here if you want to show the kernel pagetable layout in a
+ debugfs file. This information is only useful for kernel developers
+diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
+index d50430c..39509a6 100644
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -24,6 +24,8 @@ endif
+
+ GCOV_PROFILE := n
+
++GCC_PLUGINS := n
++
+ #
+ # Architecture dependencies
+ #
+diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
+index 6fc73bf..d0af3c7b 100644
+--- a/arch/arm/crypto/sha1_glue.c
++++ b/arch/arm/crypto/sha1_glue.c
+@@ -27,8 +27,8 @@
+
+ #include "sha1.h"
+
+-asmlinkage void sha1_block_data_order(u32 *digest,
+- const unsigned char *data, unsigned int rounds);
++asmlinkage void sha1_block_data_order(struct sha1_state *digest,
++ const u8 *data, int rounds);
+
+ int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+@@ -36,22 +36,20 @@ int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+ /* make sure casting to sha1_block_fn() is safe */
+ BUILD_BUG_ON(offsetof(struct sha1_state, state) != 0);
+
+- return sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_block_data_order);
++ return sha1_base_do_update(desc, data, len, sha1_block_data_order);
+ }
+ EXPORT_SYMBOL_GPL(sha1_update_arm);
+
+ static int sha1_final(struct shash_desc *desc, u8 *out)
+ {
+- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_block_data_order);
++ sha1_base_do_finalize(desc, sha1_block_data_order);
+ return sha1_base_finish(desc, out);
+ }
+
+ int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+- sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_block_data_order);
++ sha1_base_do_update(desc, data, len, sha1_block_data_order);
+ return sha1_final(desc, out);
+ }
+ EXPORT_SYMBOL_GPL(sha1_finup_arm);
+diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
+index 4e22f12..49902aa 100644
+--- a/arch/arm/crypto/sha1_neon_glue.c
++++ b/arch/arm/crypto/sha1_neon_glue.c
+@@ -31,8 +31,8 @@
+
+ #include "sha1.h"
+
+-asmlinkage void sha1_transform_neon(void *state_h, const char *data,
+- unsigned int rounds);
++asmlinkage void sha1_transform_neon(struct sha1_state *state_h, const u8 *data,
++ int rounds);
+
+ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+@@ -45,7 +45,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
+
+ kernel_neon_begin();
+ sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_transform_neon);
++ sha1_transform_neon);
+ kernel_neon_end();
+
+ return 0;
+@@ -60,8 +60,8 @@ static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
+ kernel_neon_begin();
+ if (len)
+ sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_transform_neon);
+- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_neon);
++ sha1_transform_neon);
++ sha1_base_do_finalize(desc, sha1_transform_neon);
+ kernel_neon_end();
+
+ return sha1_base_finish(desc, out);
+diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
+index a84e869..53a0c61 100644
+--- a/arch/arm/crypto/sha256_glue.c
++++ b/arch/arm/crypto/sha256_glue.c
+@@ -30,8 +30,8 @@
+
+ #include "sha256_glue.h"
+
+-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
+- unsigned int num_blks);
++asmlinkage void sha256_block_data_order(struct sha256_state *digest, const u8 *data,
++ int num_blks);
+
+ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+@@ -39,23 +39,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
+ /* make sure casting to sha256_block_fn() is safe */
+ BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
+
+- return sha256_base_do_update(desc, data, len,
+- (sha256_block_fn *)sha256_block_data_order);
++ return sha256_base_do_update(desc, data, len, sha256_block_data_order);
+ }
+ EXPORT_SYMBOL(crypto_sha256_arm_update);
+
+ static int sha256_final(struct shash_desc *desc, u8 *out)
+ {
+- sha256_base_do_finalize(desc,
+- (sha256_block_fn *)sha256_block_data_order);
++ sha256_base_do_finalize(desc, sha256_block_data_order);
+ return sha256_base_finish(desc, out);
+ }
+
+ int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+- sha256_base_do_update(desc, data, len,
+- (sha256_block_fn *)sha256_block_data_order);
++ sha256_base_do_update(desc, data, len, sha256_block_data_order);
+ return sha256_final(desc, out);
+ }
+ EXPORT_SYMBOL(crypto_sha256_arm_finup);
+diff --git a/arch/arm/crypto/sha256_neon_glue.c b/arch/arm/crypto/sha256_neon_glue.c
+index 39ccd65..f9511cb 100644
+--- a/arch/arm/crypto/sha256_neon_glue.c
++++ b/arch/arm/crypto/sha256_neon_glue.c
+@@ -26,8 +26,8 @@
+
+ #include "sha256_glue.h"
+
+-asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data,
+- unsigned int num_blks);
++asmlinkage void sha256_block_data_order_neon(struct sha256_state *digest, const u8 *data,
++ int num_blks);
+
+ static int sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+@@ -39,8 +39,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
+ return crypto_sha256_arm_update(desc, data, len);
+
+ kernel_neon_begin();
+- sha256_base_do_update(desc, data, len,
+- (sha256_block_fn *)sha256_block_data_order_neon);
++ sha256_base_do_update(desc, data, len, sha256_block_data_order_neon);
+ kernel_neon_end();
+
+ return 0;
+@@ -54,10 +53,8 @@ static int sha256_finup(struct shash_desc *desc, const u8 *data,
+
+ kernel_neon_begin();
+ if (len)
+- sha256_base_do_update(desc, data, len,
+- (sha256_block_fn *)sha256_block_data_order_neon);
+- sha256_base_do_finalize(desc,
+- (sha256_block_fn *)sha256_block_data_order_neon);
++ sha256_base_do_update(desc, data, len, sha256_block_data_order_neon);
++ sha256_base_do_finalize(desc, sha256_block_data_order_neon);
+ kernel_neon_end();
+
+ return sha256_base_finish(desc, out);
+diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
+index 269a394..c7a91f1 100644
+--- a/arch/arm/crypto/sha512-glue.c
++++ b/arch/arm/crypto/sha512-glue.c
+@@ -28,27 +28,24 @@ MODULE_ALIAS_CRYPTO("sha512");
+ MODULE_ALIAS_CRYPTO("sha384-arm");
+ MODULE_ALIAS_CRYPTO("sha512-arm");
+
+-asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks);
++asmlinkage void sha512_block_data_order(struct sha512_state *state, u8 const *src, int blocks);
+
+ int sha512_arm_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+ {
+- return sha512_base_do_update(desc, data, len,
+- (sha512_block_fn *)sha512_block_data_order);
++ return sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ }
+
+ int sha512_arm_final(struct shash_desc *desc, u8 *out)
+ {
+- sha512_base_do_finalize(desc,
+- (sha512_block_fn *)sha512_block_data_order);
++ sha512_base_do_finalize(desc, sha512_block_data_order);
+ return sha512_base_finish(desc, out);
+ }
+
+ int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+ {
+- sha512_base_do_update(desc, data, len,
+- (sha512_block_fn *)sha512_block_data_order);
++ sha512_base_do_update(desc, data, len, sha512_block_data_order);
+ return sha512_arm_final(desc, out);
+ }
+
+diff --git a/arch/arm/crypto/sha512-neon-glue.c b/arch/arm/crypto/sha512-neon-glue.c
+index 3269368..9fcbc00 100644
+--- a/arch/arm/crypto/sha512-neon-glue.c
++++ b/arch/arm/crypto/sha512-neon-glue.c
+@@ -22,7 +22,7 @@
+ MODULE_ALIAS_CRYPTO("sha384-neon");
+ MODULE_ALIAS_CRYPTO("sha512-neon");
+
+-asmlinkage void sha512_block_data_order_neon(u64 *state, u8 const *src,
++asmlinkage void sha512_block_data_order_neon(struct sha512_state *state, u8 const *src,
+ int blocks);
+
+ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
+@@ -35,8 +35,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
+ return sha512_arm_update(desc, data, len);
+
+ kernel_neon_begin();
+- sha512_base_do_update(desc, data, len,
+- (sha512_block_fn *)sha512_block_data_order_neon);
++ sha512_base_do_update(desc, data, len, sha512_block_data_order_neon);
+ kernel_neon_end();
+
+ return 0;
+@@ -50,10 +49,8 @@ static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
+
+ kernel_neon_begin();
+ if (len)
+- sha512_base_do_update(desc, data, len,
+- (sha512_block_fn *)sha512_block_data_order_neon);
+- sha512_base_do_finalize(desc,
+- (sha512_block_fn *)sha512_block_data_order_neon);
++ sha512_base_do_update(desc, data, len, sha512_block_data_order_neon);
++ sha512_base_do_finalize(desc, sha512_block_data_order_neon);
+ kernel_neon_end();
+
+ return sha512_base_finish(desc, out);
+diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
+index 66d0e21..8fa3237 100644
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -18,17 +18,41 @@
+ #include <asm/barrier.h>
+ #include <asm/cmpxchg.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
+ #ifdef __KERNEL__
+
++#ifdef CONFIG_THUMB2_KERNEL
++#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
++#else
++#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
++#endif
++
++#define _ASM_EXTABLE(from, to) \
++" .pushsection __ex_table,\"a\"\n"\
++" .align 3\n" \
++" .long " #from ", " #to"\n" \
++" .popsection"
++
+ /*
+ * On ARM, ordinary assignment (str instruction) doesn't clear the local
+ * strex/ldrex monitor on some implementations. The reason we can use it for
+ * atomic_set() is the clrex or dummy strex done on every exception return.
+ */
+ #define atomic_read(v) READ_ONCE((v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return READ_ONCE(v->counter);
++}
+ #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ WRITE_ONCE(v->counter, i);
++}
+
+ #if __LINUX_ARM_ARCH__ >= 6
+
+@@ -38,45 +62,74 @@
+ * to ensure that the update happens.
+ */
+
+-#define ATOMIC_OP(op, c_op, asm_op) \
+-static inline void atomic_##op(int i, atomic_t *v) \
++#ifdef CONFIG_PAX_REFCOUNT
++#define __OVERFLOW_POST \
++ " bvc 3f\n" \
++ "2: " REFCOUNT_TRAP_INSN "\n"\
++ "3:\n"
++#define __OVERFLOW_POST_RETURN \
++ " bvc 3f\n" \
++ " mov %1, %0\n" \
++ "2: " REFCOUNT_TRAP_INSN "\n"\
++ "3:\n"
++#define __OVERFLOW_EXTABLE \
++ "4:\n" \
++ _ASM_EXTABLE(2b, 4b)
++#else
++#define __OVERFLOW_POST
++#define __OVERFLOW_POST_RETURN
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
+ { \
+ unsigned long tmp; \
+ int result; \
+ \
+ prefetchw(&v->counter); \
+- __asm__ __volatile__("@ atomic_" #op "\n" \
++ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
+ "1: ldrex %0, [%3]\n" \
+ " " #asm_op " %0, %0, %4\n" \
++ __OVERFLOW_POST \
+ " strex %1, %0, [%3]\n" \
+ " teq %1, #0\n" \
+-" bne 1b" \
++" bne 1b\n" \
++ __OVERFLOW_EXTABLE \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+ } \
+
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
++#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op)\
++ __ATOMIC_OP(op, , c_op, asm_op##s)
++
++#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
++static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t *v)\
+ { \
+- unsigned long tmp; \
++ int tmp; \
+ int result; \
+ \
+ prefetchw(&v->counter); \
+ \
+- __asm__ __volatile__("@ atomic_" #op "_return\n" \
++ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
+ "1: ldrex %0, [%3]\n" \
+-" " #asm_op " %0, %0, %4\n" \
+-" strex %1, %0, [%3]\n" \
+-" teq %1, #0\n" \
+-" bne 1b" \
+- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
++" " #asm_op " %1, %0, %4\n" \
++ __OVERFLOW_POST_RETURN \
++" strex %0, %1, [%3]\n" \
++" teq %0, #0\n" \
++" bne 1b\n" \
++ __OVERFLOW_EXTABLE \
++ : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+ \
+ return result; \
+ }
+
++#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)\
++ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s)
++
+ #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+ { \
+@@ -99,6 +152,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+ }
+
+ #define atomic_add_return_relaxed atomic_add_return_relaxed
++#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked_relaxed
+ #define atomic_sub_return_relaxed atomic_sub_return_relaxed
+ #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+ #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+@@ -141,12 +195,17 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ __asm__ __volatile__ ("@ atomic_add_unless\n"
+ "1: ldrex %0, [%4]\n"
+ " teq %0, %5\n"
+-" beq 2f\n"
+-" add %1, %0, %6\n"
++" beq 4f\n"
++" adds %1, %0, %6\n"
++
++ __OVERFLOW_POST
++
+ " strex %2, %1, [%4]\n"
+ " teq %2, #0\n"
+ " bne 1b\n"
+-"2:"
++
++ __OVERFLOW_EXTABLE
++
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+@@ -157,14 +216,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ return oldval;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
++{
++ unsigned long oldval, res;
++
++ smp_mb();
++
++ do {
++ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
++ "ldrex %1, [%3]\n"
++ "mov %0, #0\n"
++ "teq %1, %4\n"
++ "strexeq %0, %5, [%3]\n"
++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++ : "r" (&ptr->counter), "Ir" (old), "r" (new)
++ : "cc");
++ } while (res);
++
++ smp_mb();
++
++ return oldval;
++}
++
+ #else /* ARM_ARCH_6 */
+
+ #ifdef CONFIG_SMP
+ #error SMP not supported on pre-ARMv6 CPUs
+ #endif
+
+-#define ATOMIC_OP(op, c_op, asm_op) \
+-static inline void atomic_##op(int i, atomic_t *v) \
++#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
+ { \
+ unsigned long flags; \
+ \
+@@ -173,8 +254,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
+ raw_local_irq_restore(flags); \
+ } \
+
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+-static inline int atomic_##op##_return(int i, atomic_t *v) \
++#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
++ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
++
++#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
++static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
+ { \
+ unsigned long flags; \
+ int val; \
+@@ -201,6 +285,9 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
+ return val; \
+ }
+
++#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
++ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
++
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ {
+ int ret;
+@@ -215,6 +302,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ return ret;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return atomic_cmpxchg((atomic_t *)v, old, new);
++}
++
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+ int c, old;
+@@ -250,16 +342,29 @@ ATOMIC_OPS(xor, ^=, eor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++#define atomic_xchg_unchecked(v, new) (xchg_unchecked(&((v)->counter), new))
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+
+ #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
++#define atomic_inc_and_test_unchecked(v) (atomic_add_return_unchecked(1, v) == 0)
+ #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+ #define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
++#define atomic_inc_return_unchecked_relaxed(v) (atomic_add_return_unchecked_relaxed(1, v))
+ #define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+@@ -270,6 +375,14 @@ typedef struct {
+ long long counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ long long counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #ifdef CONFIG_ARM_LPAE
+@@ -286,6 +399,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+ return result;
+ }
+
++static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ long long result;
++
++ __asm__ __volatile__("@ atomic64_read_unchecked\n"
++" ldrd %0, %H0, [%1]"
++ : "=&r" (result)
++ : "r" (&v->counter), "Qo" (v->counter)
++ );
++
++ return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+ __asm__ __volatile__("@ atomic64_set\n"
+@@ -294,6 +420,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+ : "r" (&v->counter), "r" (i)
+ );
+ }
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++ __asm__ __volatile__("@ atomic64_set_unchecked\n"
++" strd %2, %H2, [%1]"
++ : "=Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ );
++}
+ #else
+ static inline long long atomic64_read(const atomic64_t *v)
+ {
+@@ -308,6 +443,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+ return result;
+ }
+
++static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ long long result;
++
++ __asm__ __volatile__("@ atomic64_read_unchecked\n"
++" ldrexd %0, %H0, [%1]"
++ : "=&r" (result)
++ : "r" (&v->counter), "Qo" (v->counter)
++ );
++
++ return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+ long long tmp;
+@@ -322,50 +470,82 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+ }
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++ long long tmp;
++
++ prefetchw(&v->counter);
++ __asm__ __volatile__("@ atomic64_set_unchecked\n"
++"1: ldrexd %0, %H0, [%2]\n"
++" strexd %0, %3, %H3, [%2]\n"
++" teq %0, #0\n"
++" bne 1b"
++ : "=&r" (tmp), "=Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++}
+ #endif
+
+-#define ATOMIC64_OP(op, op1, op2) \
+-static inline void atomic64_##op(long long i, atomic64_t *v) \
++#define __OVERFLOW_POST_RETURN64 \
++ " bvc 3f\n" \
++" mov %Q1, %Q0\n" \
++" mov %R1, %R0\n" \
++ "2: " REFCOUNT_TRAP_INSN "\n"\
++ "3:\n"
++
++#define __ATOMIC64_OP(op, suffix, op1, op2) \
++static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
+ { \
+ long long result; \
+ unsigned long tmp; \
+ \
+ prefetchw(&v->counter); \
+- __asm__ __volatile__("@ atomic64_" #op "\n" \
++ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
+ "1: ldrexd %0, %H0, [%3]\n" \
+ " " #op1 " %Q0, %Q0, %Q4\n" \
+ " " #op2 " %R0, %R0, %R4\n" \
++ __OVERFLOW_POST \
+ " strexd %1, %0, %H0, [%3]\n" \
+ " teq %1, #0\n" \
+-" bne 1b" \
++" bne 1b\n" \
++ __OVERFLOW_EXTABLE \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+ } \
+
+-#define ATOMIC64_OP_RETURN(op, op1, op2) \
++#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2) \
++ __ATOMIC64_OP(op, , op1, op2##s)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2) \
+ static inline long long \
+-atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
++atomic64_##op##_return##suffix##_relaxed(long long i, atomic64##suffix##_t *v) \
+ { \
+ long long result; \
+- unsigned long tmp; \
++ long long tmp; \
+ \
+ prefetchw(&v->counter); \
+ \
+- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
++ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
+ "1: ldrexd %0, %H0, [%3]\n" \
+-" " #op1 " %Q0, %Q0, %Q4\n" \
+-" " #op2 " %R0, %R0, %R4\n" \
+-" strexd %1, %0, %H0, [%3]\n" \
+-" teq %1, #0\n" \
+-" bne 1b" \
+- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
++" " #op1 " %Q1, %Q0, %Q4\n" \
++" " #op2 " %R1, %R0, %R4\n" \
++ __OVERFLOW_POST_RETURN64 \
++" strexd %0, %1, %H1, [%3]\n" \
++" teq %0, #0\n" \
++" bne 1b\n" \
++ __OVERFLOW_EXTABLE \
++ : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+ \
+ return result; \
+ }
+
++#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2) \
++ __ATOMIC64_OP_RETURN(op, , op1, op2##s)
++
+ #define ATOMIC64_FETCH_OP(op, op1, op2) \
+ static inline long long \
+ atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
+@@ -398,6 +578,7 @@ ATOMIC64_OPS(add, adds, adc)
+ ATOMIC64_OPS(sub, subs, sbc)
+
+ #define atomic64_add_return_relaxed atomic64_add_return_relaxed
++#define atomic64_add_return_unchecked_relaxed atomic64_add_return_unchecked_relaxed
+ #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+ #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+ #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+@@ -422,7 +603,10 @@ ATOMIC64_OPS(xor, eor, eor)
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_FETCH_OP
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_POST_RETURN
+
+ static inline long long
+ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
+@@ -448,6 +632,13 @@ atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
+ }
+ #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+
++static inline long long
++atomic64_cmpxchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long old, long long new)
++{
++ return atomic64_cmpxchg_relaxed((atomic64_t *)ptr, old, new);
++}
++#define atomic64_cmpxchg_unchecked_relaxed atomic64_cmpxchg_unchecked_relaxed
++
+ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
+ {
+ long long result;
+@@ -468,25 +659,36 @@ static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
+ }
+ #define atomic64_xchg_relaxed atomic64_xchg_relaxed
+
++static inline long long atomic64_xchg_unchecked_relaxed(atomic64_unchecked_t *ptr, long long new)
++{
++ return atomic64_xchg_relaxed((atomic64_t *)ptr, new);
++}
++#define atomic64_xchg_unchecked_relaxed atomic64_xchg_unchecked_relaxed
++
+ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ {
+ long long result;
+- unsigned long tmp;
++ u64 tmp;
+
+ smp_mb();
+ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+ "1: ldrexd %0, %H0, [%3]\n"
+-" subs %Q0, %Q0, #1\n"
+-" sbc %R0, %R0, #0\n"
+-" teq %R0, #0\n"
+-" bmi 2f\n"
+-" strexd %1, %0, %H0, [%3]\n"
+-" teq %1, #0\n"
++" subs %Q1, %Q0, #1\n"
++" sbcs %R1, %R0, #0\n"
++
++ __OVERFLOW_POST_RETURN64
++
++" teq %R1, #0\n"
++" bmi 4f\n"
++" strexd %0, %1, %H1, [%3]\n"
++" teq %0, #0\n"
+ " bne 1b\n"
+-"2:"
+- : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
++
++ __OVERFLOW_EXTABLE
++
++ : "=&r" (tmp), "=&r" (result), "+Qo" (v->counter)
+ : "r" (&v->counter)
+ : "cc");
+
+@@ -509,13 +711,18 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+ " teq %0, %5\n"
+ " teqeq %H0, %H5\n"
+ " moveq %1, #0\n"
+-" beq 2f\n"
++" beq 4f\n"
+ " adds %Q0, %Q0, %Q6\n"
+-" adc %R0, %R0, %R6\n"
++" adcs %R0, %R0, %R6\n"
++
++ __OVERFLOW_POST
++
+ " strexd %2, %0, %H0, [%4]\n"
+ " teq %2, #0\n"
+ " bne 1b\n"
+-"2:"
++
++ __OVERFLOW_EXTABLE
++
+ : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+@@ -526,12 +733,19 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+ return ret;
+ }
+
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST_RETURN64
++#undef __OVERFLOW_POST
++
+ #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+ #define atomic64_inc(v) atomic64_add(1LL, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
+ #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
++#define atomic64_inc_return_unchecked_relaxed(v) atomic64_add_return_unchecked_relaxed(1LL, (v))
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+ #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+ #define atomic64_dec(v) atomic64_sub(1LL, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
+ #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
+ #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
+index 75fe66b..2255c86 100644
+--- a/arch/arm/include/asm/cache.h
++++ b/arch/arm/include/asm/cache.h
+@@ -4,8 +4,10 @@
+ #ifndef __ASMARM_CACHE_H
+ #define __ASMARM_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index bdd283b..e66fb83 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -116,7 +116,7 @@ struct cpu_cache_fns {
+ void (*dma_unmap_area)(const void *, size_t, int);
+
+ void (*dma_flush_range)(const void *, const void *);
+-};
++} __no_const __no_randomize_layout;
+
+ /*
+ * Select the calling method
+diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
+index 524692f..a8871ec 100644
+--- a/arch/arm/include/asm/checksum.h
++++ b/arch/arm/include/asm/checksum.h
+@@ -37,7 +37,19 @@ __wsum
+ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+
+ __wsum
+-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
++__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
++
++static inline __wsum
++csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
++{
++ __wsum ret;
++ pax_open_userland();
++ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
++ pax_close_userland();
++ return ret;
++}
++
++
+
+ /*
+ * Fold a partial checksum without adding pseudo headers
+diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
+index 97882f9..ff9d6ac 100644
+--- a/arch/arm/include/asm/cmpxchg.h
++++ b/arch/arm/include/asm/cmpxchg.h
+@@ -117,6 +117,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
+ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
+ sizeof(*(ptr))); \
+ })
++#define xchg_unchecked_relaxed(ptr, x) ({ \
++ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
++ sizeof(*(ptr))); \
++})
+
+ #include <asm-generic/cmpxchg-local.h>
+
+@@ -128,6 +132,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
+ #endif
+
+ #define xchg xchg_relaxed
++#define xchg_unchecked xchg_unchecked_relaxed
+
+ /*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
+index baefe1d..29cb35a 100644
+--- a/arch/arm/include/asm/cpuidle.h
++++ b/arch/arm/include/asm/cpuidle.h
+@@ -32,7 +32,7 @@ struct device_node;
+ struct cpuidle_ops {
+ int (*suspend)(unsigned long arg);
+ int (*init)(struct device_node *, int cpu);
+-};
++} __no_const;
+
+ struct of_cpuidle_method {
+ const char *method;
+diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
+index 99d9f63..ec44cb5 100644
+--- a/arch/arm/include/asm/domain.h
++++ b/arch/arm/include/asm/domain.h
+@@ -42,7 +42,6 @@
+ #define DOMAIN_USER 1
+ #define DOMAIN_IO 0
+ #endif
+-#define DOMAIN_VECTORS 3
+
+ /*
+ * Domain types
+@@ -51,9 +50,28 @@
+ #define DOMAIN_CLIENT 1
+ #ifdef CONFIG_CPU_USE_DOMAINS
+ #define DOMAIN_MANAGER 3
++#define DOMAIN_VECTORS 3
++#define DOMAIN_USERCLIENT DOMAIN_CLIENT
+ #else
++
++#ifdef CONFIG_PAX_KERNEXEC
+ #define DOMAIN_MANAGER 1
++#define DOMAIN_KERNEXEC 3
++#else
++#define DOMAIN_MANAGER 1
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define DOMAIN_USERCLIENT 0
++#define DOMAIN_UDEREF 1
++#define DOMAIN_VECTORS DOMAIN_KERNEL
++#else
++#define DOMAIN_USERCLIENT 1
++#define DOMAIN_VECTORS DOMAIN_USER
++#endif
++
+ #endif
++#define DOMAIN_KERNELCLIENT 1
+
+ #define domain_mask(dom) ((3) << (2 * (dom)))
+ #define domain_val(dom,type) ((type) << (2 * (dom)))
+@@ -62,13 +80,19 @@
+ #define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+- domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ /* DOMAIN_VECTORS is defined to DOMAIN_KERNEL */
++#define DACR_INIT \
++ (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
++ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
+ #else
+ #define DACR_INIT \
+- (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
++ (domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+- domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+ #endif
+
+@@ -124,6 +148,17 @@ static inline void set_domain(unsigned val)
+ set_domain(domain); \
+ } while (0)
+
++#elif defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++#define modify_domain(dom,type) \
++ do { \
++ struct thread_info *thread = current_thread_info(); \
++ unsigned int domain = get_domain(); \
++ domain &= ~domain_mask(dom); \
++ domain = domain | domain_val(dom, type); \
++ thread->cpu_domain = domain; \
++ set_domain(domain); \
++ } while (0)
++
+ #else
+ static inline void modify_domain(unsigned dom, unsigned type) { }
+ #endif
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index d2315ff..f60b47b 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
+index de53547..52b9a28 100644
+--- a/arch/arm/include/asm/fncpy.h
++++ b/arch/arm/include/asm/fncpy.h
+@@ -81,7 +81,9 @@
+ BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
+ (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
+ \
++ pax_open_kernel(); \
+ memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
++ pax_close_kernel(); \
+ flush_icache_range((unsigned long)(dest_buf), \
+ (unsigned long)(dest_buf) + (size)); \
+ \
+diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
+index 6795368..6c4d749 100644
+--- a/arch/arm/include/asm/futex.h
++++ b/arch/arm/include/asm/futex.h
+@@ -107,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ return -EFAULT;
+
+ preempt_disable();
++
+ __ua_flags = uaccess_save_and_enable();
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: " TUSER(ldr) " %1, [%4]\n"
+diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
+index 83eb2f7..ed77159 100644
+--- a/arch/arm/include/asm/kmap_types.h
++++ b/arch/arm/include/asm/kmap_types.h
+@@ -4,6 +4,6 @@
+ /*
+ * This is the "bare minimum". AIO seems to require this.
+ */
+-#define KM_TYPE_NR 16
++#define KM_TYPE_NR 17
+
+ #endif
+diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
+index 9e614a1..3302cca 100644
+--- a/arch/arm/include/asm/mach/dma.h
++++ b/arch/arm/include/asm/mach/dma.h
+@@ -22,7 +22,7 @@ struct dma_ops {
+ int (*residue)(unsigned int, dma_t *); /* optional */
+ int (*setspeed)(unsigned int, dma_t *, int); /* optional */
+ const char *type;
+-};
++} __do_const;
+
+ struct dma_struct {
+ void *addr; /* single DMA address */
+diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
+index 9b7c328..2dfe68b 100644
+--- a/arch/arm/include/asm/mach/map.h
++++ b/arch/arm/include/asm/mach/map.h
+@@ -23,17 +23,19 @@ struct map_desc {
+
+ /* types 0-3 are defined in asm/io.h */
+ enum {
+- MT_UNCACHED = 4,
+- MT_CACHECLEAN,
+- MT_MINICLEAN,
++ MT_UNCACHED_RW = 4,
++ MT_CACHECLEAN_RO,
++ MT_MINICLEAN_RO,
+ MT_LOW_VECTORS,
+ MT_HIGH_VECTORS,
+- MT_MEMORY_RWX,
++ __MT_MEMORY_RWX,
+ MT_MEMORY_RW,
+- MT_ROM,
+- MT_MEMORY_RWX_NONCACHED,
++ MT_MEMORY_RX,
++ MT_ROM_RX,
++ MT_MEMORY_RW_NONCACHED,
++ MT_MEMORY_RX_NONCACHED,
+ MT_MEMORY_RW_DTCM,
+- MT_MEMORY_RWX_ITCM,
++ MT_MEMORY_RX_ITCM,
+ MT_MEMORY_RW_SO,
+ MT_MEMORY_DMA_READY,
+ };
+diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
+index c2bf24f..69e437c 100644
+--- a/arch/arm/include/asm/outercache.h
++++ b/arch/arm/include/asm/outercache.h
+@@ -39,7 +39,7 @@ struct outer_cache_fns {
+ /* This is an ARM L2C thing */
+ void (*write_sec)(unsigned long, unsigned);
+ void (*configure)(const struct l2x0_regs *);
+-};
++} __no_const;
+
+ extern struct outer_cache_fns outer_cache;
+
+diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
+index 4355f0e..cd9168e 100644
+--- a/arch/arm/include/asm/page.h
++++ b/arch/arm/include/asm/page.h
+@@ -23,6 +23,7 @@
+
+ #else
+
++#include <linux/compiler.h>
+ #include <asm/glue.h>
+
+ /*
+@@ -114,7 +115,7 @@ struct cpu_user_fns {
+ void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
+ void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+-};
++} __no_const;
+
+ #ifdef MULTI_USER
+ extern struct cpu_user_fns cpu_user;
+diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
+index b2902a5..da11e4d 100644
+--- a/arch/arm/include/asm/pgalloc.h
++++ b/arch/arm/include/asm/pgalloc.h
+@@ -17,6 +17,7 @@
+ #include <asm/processor.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/system_info.h>
+
+ #define check_pgt_cache() do { } while (0)
+
+@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
++
+ #else /* !CONFIG_ARM_LPAE */
+
+ /*
+@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, pmd) do { } while (0)
+ #define pud_populate(mm,pmd,pte) BUG()
++#define pud_populate_kernel(mm,pmd,pte) BUG()
+
+ #endif /* CONFIG_ARM_LPAE */
+
+@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ __free_page(pte);
+ }
+
++static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
++{
++#ifdef CONFIG_ARM_LPAE
++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
++#else
++ if (addr & SECTION_SIZE)
++ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
++ else
++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
++#endif
++ flush_pmd_entry(pmdp);
++}
++
+ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
+ pmdval_t prot)
+ {
+diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
+index 3f82e9d..2a85e8b 100644
+--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
++++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
+@@ -28,7 +28,7 @@
+ /*
+ * - section
+ */
+-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
++#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
+ #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
+ #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+ #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
+@@ -40,6 +40,7 @@
+ #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
+ #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
+ #define PMD_SECT_AF (_AT(pmdval_t, 0))
++#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
+
+ #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
+ #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
+@@ -70,6 +71,7 @@
+ * - extended small page/tiny page
+ */
+ #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
++#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
+ #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
+ #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
+ #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index 92fd2c8..061dae1 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -127,6 +127,9 @@
+ #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
+ #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
+
++/* Two-level page tables only have PXN in the PGD, not in the PTE. */
++#define L_PTE_PXN (_AT(pteval_t, 0))
++
+ /*
+ * These are the memory types, defined to be compatible with
+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index 2a029bc..a0524c7 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -80,6 +80,7 @@
+ #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
+ #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
+ #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
++#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
+ #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
+ #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
+ #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
+@@ -90,10 +91,12 @@
+ #define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
+ #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
+ #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
++#define PMD_SECT_RDONLY PMD_SECT_AP2
+
+ /*
+ * To be used in assembly code with the upper page attributes.
+ */
++#define L_PTE_PXN_HIGH (1 << (53 - 32))
+ #define L_PTE_XN_HIGH (1 << (54 - 32))
+ #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
+
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index a8d656d..2febb8a 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -33,6 +33,9 @@
+ #include <asm/pgtable-2level.h>
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ /*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+@@ -48,6 +51,9 @@
+ #define LIBRARY_TEXT_START 0x0c000000
+
+ #ifndef __ASSEMBLY__
++extern pteval_t __supported_pte_mask;
++extern pmdval_t __supported_pmd_mask;
++
+ extern void __pte_error(const char *file, int line, pte_t);
+ extern void __pmd_error(const char *file, int line, pmd_t);
+ extern void __pgd_error(const char *file, int line, pgd_t);
+@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+ #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
+ #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
+
++#define __HAVE_ARCH_PAX_OPEN_KERNEL
++#define __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++#include <asm/domain.h>
++#include <linux/thread_info.h>
++#include <linux/preempt.h>
++
++static inline int test_domain(int domain, int domaintype)
++{
++ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
++}
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void) {
++#ifdef CONFIG_ARM_LPAE
++ /* TODO */
++#else
++ preempt_disable();
++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
++ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
++#endif
++ return 0;
++}
++
++static inline unsigned long pax_close_kernel(void) {
++#ifdef CONFIG_ARM_LPAE
++ /* TODO */
++#else
++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
++ /* DOMAIN_MANAGER = "client" under KERNEXEC */
++ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
++ preempt_enable_no_resched();
++#endif
++ return 0;
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+ * This is the lowest virtual address we can permit any user space
+ * mapping to be mapped at. This is particularly important for
+@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+ /*
+ * The pgprot_* and protection_map entries will be fixed up in runtime
+ * to include the cachable and bufferable bits based on memory policy,
+- * as well as any architecture dependent bits like global/ASID and SMP
+- * shared mapping bits.
++ * as well as any architecture dependent bits like global/ASID, PXN,
++ * and SMP shared mapping bits.
+ */
+ #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
+
+@@ -308,7 +356,7 @@ static inline pte_t pte_mknexec(pte_t pte)
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
+- L_PTE_NONE | L_PTE_VALID;
++ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+ }
+diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
+index 3d6dc8b..1262ad3 100644
+--- a/arch/arm/include/asm/smp.h
++++ b/arch/arm/include/asm/smp.h
+@@ -108,7 +108,7 @@ struct smp_operations {
+ int (*cpu_disable)(unsigned int cpu);
+ #endif
+ #endif
+-};
++} __no_const;
+
+ struct of_cpu_method {
+ const char *method;
+diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
+index cf4f3aa..8f2f2d9 100644
+--- a/arch/arm/include/asm/string.h
++++ b/arch/arm/include/asm/string.h
+@@ -7,19 +7,19 @@
+ */
+
+ #define __HAVE_ARCH_STRRCHR
+-extern char * strrchr(const char * s, int c);
++extern char * strrchr(const char * s, int c) __nocapture(-1);
+
+ #define __HAVE_ARCH_STRCHR
+-extern char * strchr(const char * s, int c);
++extern char * strchr(const char * s, int c) __nocapture(-1);
+
+ #define __HAVE_ARCH_MEMCPY
+-extern void * memcpy(void *, const void *, __kernel_size_t);
++extern void * memcpy(void *, const void *, __kernel_size_t) __nocapture(2);
+
+ #define __HAVE_ARCH_MEMMOVE
+-extern void * memmove(void *, const void *, __kernel_size_t);
++extern void * memmove(void *, const void *, __kernel_size_t) __nocapture(2);
+
+ #define __HAVE_ARCH_MEMCHR
+-extern void * memchr(const void *, int, __kernel_size_t);
++extern void * memchr(const void *, int, __kernel_size_t) __nocapture(-1);
+
+ #define __HAVE_ARCH_MEMSET
+ extern void * memset(void *, int, __kernel_size_t);
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index 776757d..a552c1d 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -73,6 +73,9 @@ struct thread_info {
+ .flags = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
++ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
++ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
+ }
+
+ #define init_thread_info (init_thread_union.thread_info)
+@@ -143,6 +146,10 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+ #define TIF_SECCOMP 7 /* seccomp syscall filtering active */
++/* within 8 bits of TIF_SYSCALL_TRACE
++ * to meet flexible second operand requirements
++ */
++#define TIF_GRSEC_SETXID 8
+
+ #define TIF_NOHZ 12 /* in adaptive nohz mode */
+ #define TIF_USING_IWMMXT 17
+@@ -158,10 +165,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+ #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
+
+ /* Checks for any syscall work in entry-common.S */
+ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
+
+ /*
+ * Change these and you break ASM code in entry-common.S
+diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h
+index f6fcc67..5895d62 100644
+--- a/arch/arm/include/asm/timex.h
++++ b/arch/arm/include/asm/timex.h
+@@ -13,6 +13,7 @@
+ #define _ASMARM_TIMEX_H
+
+ typedef unsigned long cycles_t;
++extern int read_current_timer(unsigned long *timer_val);
+ #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; })
+
+ #endif
+diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
+index 5f833f7..76e6644 100644
+--- a/arch/arm/include/asm/tls.h
++++ b/arch/arm/include/asm/tls.h
+@@ -3,6 +3,7 @@
+
+ #include <linux/compiler.h>
+ #include <asm/thread_info.h>
++#include <asm/pgtable.h>
+
+ #ifdef __ASSEMBLY__
+ #include <asm/asm-offsets.h>
+@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
+ * at 0xffff0fe0 must be used instead. (see
+ * entry-armv.S for details)
+ */
++ pax_open_kernel();
+ *((unsigned int *)0xffff0ff0) = val;
++ pax_close_kernel();
+ #endif
+ }
+
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index 1f59ea05..81245f0 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -18,6 +18,7 @@
+ #include <asm/domain.h>
+ #include <asm/unified.h>
+ #include <asm/compiler.h>
++#include <asm/pgtable.h>
+
+ #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ #include <asm-generic/uaccess-unaligned.h>
+@@ -50,6 +51,59 @@ struct exception_table_entry
+ extern int fixup_exception(struct pt_regs *regs);
+
+ /*
++ * These two are intentionally not defined anywhere - if the kernel
++ * code generates any references to them, that's a bug.
++ */
++extern int __get_user_bad(void);
++extern int __put_user_bad(void);
++
++/*
++ * Note that this is actually 0x1,0000,0000
++ */
++#define KERNEL_DS 0x00000000
++#define get_ds() (KERNEL_DS)
++
++#ifdef CONFIG_MMU
++
++#define USER_DS TASK_SIZE
++#define get_fs() (current_thread_info()->addr_limit)
++
++static inline void set_fs(mm_segment_t fs)
++{
++ current_thread_info()->addr_limit = fs;
++ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
++}
++
++#define segment_eq(a, b) ((a) == (b))
++
++#define __HAVE_ARCH_PAX_OPEN_USERLAND
++#define __HAVE_ARCH_PAX_CLOSE_USERLAND
++
++static inline void pax_open_userland(void)
++{
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (segment_eq(get_fs(), USER_DS)) {
++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
++ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
++ }
++#endif
++
++}
++
++static inline void pax_close_userland(void)
++{
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (segment_eq(get_fs(), USER_DS)) {
++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
++ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
++ }
++#endif
++
++}
++
++/*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+@@ -66,6 +120,7 @@ static inline unsigned int uaccess_save_and_enable(void)
+
+ return old_domain;
+ #else
++ pax_open_userland();
+ return 0;
+ #endif
+ }
+@@ -75,35 +130,11 @@ static inline void uaccess_restore(unsigned int flags)
+ #ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /* Restore the user access mask */
+ set_domain(flags);
++#else
++ pax_close_userland();
+ #endif
+ }
+
+-/*
+- * These two are intentionally not defined anywhere - if the kernel
+- * code generates any references to them, that's a bug.
+- */
+-extern int __get_user_bad(void);
+-extern int __put_user_bad(void);
+-
+-/*
+- * Note that this is actually 0x1,0000,0000
+- */
+-#define KERNEL_DS 0x00000000
+-#define get_ds() (KERNEL_DS)
+-
+-#ifdef CONFIG_MMU
+-
+-#define USER_DS TASK_SIZE
+-#define get_fs() (current_thread_info()->addr_limit)
+-
+-static inline void set_fs(mm_segment_t fs)
+-{
+- current_thread_info()->addr_limit = fs;
+- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
+-}
+-
+-#define segment_eq(a, b) ((a) == (b))
+-
+ /* We use 33-bit arithmetic here... */
+ #define __range_ok(addr, size) ({ \
+ unsigned long flag, roksum; \
+@@ -268,6 +299,7 @@ static inline void set_fs(mm_segment_t fs)
+
+ #endif /* CONFIG_MMU */
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
+
+ #define user_addr_max() \
+@@ -474,10 +506,10 @@ do { \
+
+
+ #ifdef CONFIG_MMU
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+-static inline unsigned long __must_check
++static inline unsigned long __must_check __size_overflow(3)
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ unsigned int __ua_flags;
+@@ -489,9 +521,9 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ return n;
+ }
+
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(3)
+ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+ static inline unsigned long __must_check
+@@ -511,9 +543,9 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ #endif
+ }
+
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(2)
+ arm_clear_user(void __user *addr, unsigned long n);
+-extern unsigned long __must_check
++extern unsigned long __must_check __size_overflow(2)
+ __clear_user_std(void __user *addr, unsigned long n);
+
+ static inline unsigned long __must_check
+@@ -534,6 +566,10 @@ __clear_user(void __user *addr, unsigned long n)
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ unsigned long res = n;
++
++ if ((long)n < 0)
++ return n;
++
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+@@ -543,6 +579,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
+index 5af0ed1..cea83883 100644
+--- a/arch/arm/include/uapi/asm/ptrace.h
++++ b/arch/arm/include/uapi/asm/ptrace.h
+@@ -92,7 +92,7 @@
+ * ARMv7 groups of PSR bits
+ */
+ #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
+-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
++#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
+ #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
+ #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
+
+diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
+index 8e8d20c..fd4b52d 100644
+--- a/arch/arm/kernel/armksyms.c
++++ b/arch/arm/kernel/armksyms.c
+@@ -59,7 +59,7 @@ EXPORT_SYMBOL(arm_delay_ops);
+
+ /* networking */
+ EXPORT_SYMBOL(csum_partial);
+-EXPORT_SYMBOL(csum_partial_copy_from_user);
++EXPORT_SYMBOL(__csum_partial_copy_from_user);
+ EXPORT_SYMBOL(csum_partial_copy_nocheck);
+ EXPORT_SYMBOL(__csum_ipv6_magic);
+
+diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c
+index 9f43ba0..1cee475 100644
+--- a/arch/arm/kernel/efi.c
++++ b/arch/arm/kernel/efi.c
+@@ -60,9 +60,9 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
+ * preference.
+ */
+ if (md->attribute & EFI_MEMORY_WB)
+- desc.type = MT_MEMORY_RWX;
++ desc.type = __MT_MEMORY_RWX;
+ else if (md->attribute & EFI_MEMORY_WT)
+- desc.type = MT_MEMORY_RWX_NONCACHED;
++ desc.type = MT_MEMORY_RW_NONCACHED;
+ else if (md->attribute & EFI_MEMORY_WC)
+ desc.type = MT_DEVICE_WC;
+ else
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 9f157e7..8e3f857 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -50,6 +50,87 @@
+ 9997:
+ .endm
+
++ .macro pax_enter_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ make aligned space for saved DACR
++ sub sp, sp, #8
++ @ save regs
++ stmdb sp!, {r1, r2}
++ @ read DACR from cpu_domain into r1
++ mov r2, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r2, r2, #(0x1fc0)
++ bic r2, r2, #(0x3f)
++ ldr r1, [r2, #TI_CPU_DOMAIN]
++ @ store old DACR on stack
++ str r1, [sp, #8]
++#ifdef CONFIG_PAX_KERNEXEC
++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++#endif
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r2, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r1, r2}
++#endif
++ .endm
++
++ .macro pax_open_userland
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read DACR from cpu_domain into r1
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ ldr r1, [r0, #TI_CPU_DOMAIN]
++ @ set current DOMAIN_USER to DOMAIN_CLIENT
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
++
++ .macro pax_close_userland
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read DACR from cpu_domain into r1
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ ldr r1, [r0, #TI_CPU_DOMAIN]
++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
++
+ .macro pabt_helper
+ @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
+ #ifdef MULTI_PABORT
+@@ -92,11 +173,15 @@
+ * Invalid mode handlers
+ */
+ .macro inv_entry, reason
++
++ pax_enter_kernel
++
+ sub sp, sp, #PT_REGS_SIZE
+ ARM( stmib sp, {r1 - lr} )
+ THUMB( stmia sp, {r0 - r12} )
+ THUMB( str sp, [sp, #S_SP] )
+ THUMB( str lr, [sp, #S_LR] )
++
+ mov r1, #\reason
+ .endm
+
+@@ -152,6 +237,9 @@ ENDPROC(__und_invalid)
+ .macro svc_entry, stack_hole=0, trace=1, uaccess=1
+ UNWIND(.fnstart )
+ UNWIND(.save {r0 - pc} )
++
++ pax_enter_kernel
++
+ sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
+ #ifdef CONFIG_THUMB2_KERNEL
+ SPFIX( str r0, [sp] ) @ temporarily saved
+@@ -167,7 +255,12 @@ ENDPROC(__und_invalid)
+ ldmia r0, {r3 - r5}
+ add r7, sp, #S_SP - 4 @ here for interlock avoidance
+ mov r6, #-1 @ "" "" "" ""
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ offset sp by 8 as done in pax_enter_kernel
++ add r2, sp, #(SVC_REGS_SIZE + \stack_hole + 4)
++#else
+ add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
++#endif
+ SPFIX( addeq r2, r2, #4 )
+ str r3, [sp, #-4]! @ save the "real" r0 copied
+ @ from the exception stack
+@@ -382,6 +475,9 @@ ENDPROC(__fiq_abt)
+ .macro usr_entry, trace=1, uaccess=1
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind ) @ don't unwind the user space
++
++ pax_enter_kernel_user
++
+ sub sp, sp, #PT_REGS_SIZE
+ ARM( stmib sp, {r1 - r12} )
+ THUMB( stmia sp, {r0 - r12} )
+@@ -495,7 +591,9 @@ __und_usr:
+ tst r3, #PSR_T_BIT @ Thumb mode?
+ bne __und_usr_thumb
+ sub r4, r2, #4 @ ARM instr at LR - 4
++ pax_open_userland
+ 1: ldrt r0, [r4]
++ pax_close_userland
+ ARM_BE8(rev r0, r0) @ little endian instruction
+
+ uaccess_disable ip
+@@ -531,11 +629,15 @@ __und_usr_thumb:
+ */
+ .arch armv6t2
+ #endif
++ pax_open_userland
+ 2: ldrht r5, [r4]
++ pax_close_userland
+ ARM_BE8(rev16 r5, r5) @ little endian instruction
+ cmp r5, #0xe800 @ 32bit instruction if xx != 0
+ blo __und_usr_fault_16_pan @ 16bit undefined instruction
++ pax_open_userland
+ 3: ldrht r0, [r2]
++ pax_close_userland
+ ARM_BE8(rev16 r0, r0) @ little endian instruction
+ uaccess_disable ip
+ add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
+@@ -566,7 +668,8 @@ ENDPROC(__und_usr)
+ */
+ .pushsection .text.fixup, "ax"
+ .align 2
+-4: str r4, [sp, #S_PC] @ retry current instruction
++4: pax_close_userland
++ str r4, [sp, #S_PC] @ retry current instruction
+ ret r9
+ .popsection
+ .pushsection __ex_table,"a"
+@@ -788,7 +891,7 @@ ENTRY(__switch_to)
+ THUMB( str lr, [ip], #4 )
+ ldr r4, [r2, #TI_TP_VALUE]
+ ldr r5, [r2, #TI_TP_VALUE + 4]
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ mrc p15, 0, r6, c3, c0, 0 @ Get domain register
+ str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
+ ldr r6, [r2, #TI_CPU_DOMAIN]
+@@ -799,7 +902,7 @@ ENTRY(__switch_to)
+ ldr r8, =__stack_chk_guard
+ ldr r7, [r7, #TSK_STACK_CANARY]
+ #endif
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ mcr p15, 0, r6, c3, c0, 0 @ Set domain register
+ #endif
+ mov r5, r0
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index 10c3283..c47cdf5 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -11,18 +11,46 @@
+ #include <asm/assembler.h>
+ #include <asm/unistd.h>
+ #include <asm/ftrace.h>
++#include <asm/domain.h>
+ #include <asm/unwind.h>
+
++#include "entry-header.S"
++
+ #ifdef CONFIG_NEED_RET_TO_USER
+ #include <mach/entry-macro.S>
+ #else
+ .macro arch_ret_to_user, tmp1, tmp2
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ save regs
++ stmdb sp!, {r1, r2}
++ @ read DACR from cpu_domain into r1
++ mov r2, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r2, r2, #(0x1fc0)
++ bic r2, r2, #(0x3f)
++ ldr r1, [r2, #TI_CPU_DOMAIN]
++#ifdef CONFIG_PAX_KERNEXEC
++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ set current DOMAIN_USER to DOMAIN_UDEREF
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
++#endif
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r2, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r1, r2}
++#endif
+ .endm
+ #endif
+
+-#include "entry-header.S"
+-
+-
+ .align 5
+ #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
+ /*
+@@ -36,7 +64,9 @@ ret_fast_syscall:
+ UNWIND(.cantunwind )
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #_TIF_SYSCALL_WORK
++ bne fast_work_pending
++ tst r1, #_TIF_WORK_MASK
+ bne fast_work_pending
+
+ /* perform architecture specific actions before user return */
+@@ -62,7 +92,9 @@ ret_fast_syscall:
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+ disable_irq_notrace @ disable interrupts
+ ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
+- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
++ tst r1, #_TIF_SYSCALL_WORK
++ bne __sys_trace_return_nosave
++ tst r1, #_TIF_WORK_MASK
+ beq no_work_pending
+ UNWIND(.fnend )
+ ENDPROC(ret_fast_syscall)
+@@ -199,6 +231,12 @@ ENTRY(vector_swi)
+
+ uaccess_disable tbl
+
++ /*
++ * do this here to avoid a performance hit of wrapping the code above
++ * that directly dereferences userland to parse the SWI instruction
++ */
++ pax_enter_kernel_user
++
+ adr tbl, sys_call_table @ load syscall table pointer
+
+ #if defined(CONFIG_OABI_COMPAT)
+diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
+index 6391728..6bf90b8 100644
+--- a/arch/arm/kernel/entry-header.S
++++ b/arch/arm/kernel/entry-header.S
+@@ -196,6 +196,59 @@
+ msr cpsr_c, \rtemp @ switch back to the SVC mode
+ .endm
+
++ .macro pax_enter_kernel_user
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read DACR from cpu_domain into r1
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ ldr r1, [r0, #TI_CPU_DOMAIN]
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
++
++ .macro pax_exit_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read old DACR from stack into r1
++ ldr r1, [sp, #(8 + S_SP)]
++ sub r1, r1, #8
++ ldr r1, [r1]
++
++ @ write r1 to current_thread_info()->cpu_domain
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
+
+ .macro svc_exit, rpsr, irq = 0
+ .if \irq != 0
+@@ -219,6 +272,8 @@
+ uaccess_restore
+ str r1, [tsk, #TI_ADDR_LIMIT]
+
++ pax_exit_kernel
++
+ #ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode SVC restore
+ msr spsr_cxsf, \rpsr
+diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
+index 059c3da..8e45cfc 100644
+--- a/arch/arm/kernel/fiq.c
++++ b/arch/arm/kernel/fiq.c
+@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
+ void *base = vectors_page;
+ unsigned offset = FIQ_OFFSET;
+
++ pax_open_kernel();
+ memcpy(base + offset, start, length);
++ pax_close_kernel();
++
+ if (!cache_is_vipt_nonaliasing())
+ flush_icache_range((unsigned long)base + offset, offset +
+ length);
+diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
+index 4f14b5c..91ff261 100644
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -38,17 +38,47 @@
+ #endif
+
+ #ifdef CONFIG_MMU
+-void *module_alloc(unsigned long size)
++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+- void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
++ void *p;
++
++ if (!size || (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR))
++ return NULL;
++
++ p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
++ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
+ return p;
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
++ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ }
++
++void *module_alloc(unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return __module_alloc(size, PAGE_KERNEL);
++#else
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++#endif
++
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++void module_memfree_exec(void *module_region)
++{
++ module_memfree(module_region);
++}
++EXPORT_SYMBOL(module_memfree_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
+ #endif
+
+ int
+diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
+index 69bda1a..755113a 100644
+--- a/arch/arm/kernel/patch.c
++++ b/arch/arm/kernel/patch.c
+@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
+ else
+ __acquire(&patch_lock);
+
++ pax_open_kernel();
+ if (thumb2 && __opcode_is_thumb16(insn)) {
+ *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
+ size = sizeof(u16);
+@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
+ *(u32 *)waddr = insn;
+ size = sizeof(u32);
+ }
++ pax_close_kernel();
+
+ if (waddr != addr) {
+ flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 91d2d5b..042c26e 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -118,8 +118,8 @@ void __show_regs(struct pt_regs *regs)
+
+ show_regs_print_info(KERN_DEFAULT);
+
+- print_symbol("PC is at %s\n", instruction_pointer(regs));
+- print_symbol("LR is at %s\n", regs->ARM_lr);
++ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
++ printk("LR is at %pA\n", (void *)regs->ARM_lr);
+ printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
+ "sp : %08lx ip : %08lx fp : %08lx\n",
+ regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
+@@ -233,7 +233,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
+
+ memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ /*
+ * Copy the initial value of the domain access control register
+ * from the current thread: thread->addr_limit will have been
+@@ -336,7 +336,7 @@ static struct vm_area_struct gate_vma = {
+
+ static int __init gate_vma_init(void)
+ {
+- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+ return 0;
+ }
+ arch_initcall(gate_vma_init);
+@@ -365,92 +365,14 @@ const char *arch_vma_name(struct vm_area_struct *vma)
+ return is_gate_vma(vma) ? "[vectors]" : NULL;
+ }
+
+-/* If possible, provide a placement hint at a random offset from the
+- * stack for the sigpage and vdso pages.
+- */
+-static unsigned long sigpage_addr(const struct mm_struct *mm,
+- unsigned int npages)
+-{
+- unsigned long offset;
+- unsigned long first;
+- unsigned long last;
+- unsigned long addr;
+- unsigned int slots;
+-
+- first = PAGE_ALIGN(mm->start_stack);
+-
+- last = TASK_SIZE - (npages << PAGE_SHIFT);
+-
+- /* No room after stack? */
+- if (first > last)
+- return 0;
+-
+- /* Just enough room? */
+- if (first == last)
+- return first;
+-
+- slots = ((last - first) >> PAGE_SHIFT) + 1;
+-
+- offset = get_random_int() % slots;
+-
+- addr = first + (offset << PAGE_SHIFT);
+-
+- return addr;
+-}
+-
+-static struct page *signal_page;
+-extern struct page *get_signal_page(void);
+-
+-static const struct vm_special_mapping sigpage_mapping = {
+- .name = "[sigpage]",
+- .pages = &signal_page,
+-};
+-
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+ struct mm_struct *mm = current->mm;
+- struct vm_area_struct *vma;
+- unsigned long npages;
+- unsigned long addr;
+- unsigned long hint;
+- int ret = 0;
+-
+- if (!signal_page)
+- signal_page = get_signal_page();
+- if (!signal_page)
+- return -ENOMEM;
+-
+- npages = 1; /* for sigpage */
+- npages += vdso_total_pages;
+
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+- hint = sigpage_addr(mm, npages);
+- addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
+- if (IS_ERR_VALUE(addr)) {
+- ret = addr;
+- goto up_fail;
+- }
+-
+- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
+- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+- &sigpage_mapping);
+-
+- if (IS_ERR(vma)) {
+- ret = PTR_ERR(vma);
+- goto up_fail;
+- }
+-
+- mm->context.sigpage = addr;
+-
+- /* Unlike the sigpage, failure to install the vdso is unlikely
+- * to be fatal to the process, so no error check needed
+- * here.
+- */
+- arm_install_vdso(mm, addr + PAGE_SIZE);
+-
+- up_fail:
++ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
+ up_write(&mm->mmap_sem);
+- return ret;
++ return 0;
+ }
+ #endif
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index ae738a6..ee4d46f 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
+ regs->ARM_ip = ip;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
+ {
+ current_thread_info()->syscall = scno;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
+diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
+index 3fa867a..d610607 100644
+--- a/arch/arm/kernel/reboot.c
++++ b/arch/arm/kernel/reboot.c
+@@ -120,6 +120,7 @@ void machine_power_off(void)
+
+ if (pm_power_off)
+ pm_power_off();
++ while (1);
+ }
+
+ /*
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 34e3f3c..3d2dada 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -112,6 +112,8 @@ EXPORT_SYMBOL(elf_hwcap);
+ unsigned int elf_hwcap2 __read_mostly;
+ EXPORT_SYMBOL(elf_hwcap2);
+
++pteval_t __supported_pte_mask __read_only;
++pmdval_t __supported_pmd_mask __read_only;
+
+ #ifdef MULTI_CPU
+ struct processor processor __ro_after_init;
+@@ -257,9 +259,13 @@ static int __get_cpu_architecture(void)
+ * Register 0 and check for VMSAv7 or PMSAv7 */
+ unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
+ if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
+- (mmfr0 & 0x000000f0) >= 0x00000030)
++ (mmfr0 & 0x000000f0) >= 0x00000030) {
+ cpu_arch = CPU_ARCH_ARMv7;
+- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
++ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
++ __supported_pte_mask |= L_PTE_PXN;
++ __supported_pmd_mask |= PMD_PXNTABLE;
++ }
++ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
+ (mmfr0 & 0x000000f0) == 0x00000020)
+ cpu_arch = CPU_ARCH_ARMv6;
+ else
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index 7b8f214..ece8e28 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -24,8 +24,6 @@
+
+ extern const unsigned long sigreturn_codes[7];
+
+-static unsigned long signal_return_offset;
+-
+ #ifdef CONFIG_CRUNCH
+ static int preserve_crunch_context(struct crunch_sigframe __user *frame)
+ {
+@@ -388,8 +386,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
+ * except when the MPU has protected the vectors
+ * page from PL0
+ */
+- retcode = mm->context.sigpage + signal_return_offset +
+- (idx << 2) + thumb;
++ retcode = mm->context.sigpage + (idx << 2) + thumb;
+ } else
+ #endif
+ {
+@@ -601,33 +598,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ } while (thread_flags & _TIF_WORK_MASK);
+ return 0;
+ }
+-
+-struct page *get_signal_page(void)
+-{
+- unsigned long ptr;
+- unsigned offset;
+- struct page *page;
+- void *addr;
+-
+- page = alloc_pages(GFP_KERNEL, 0);
+-
+- if (!page)
+- return NULL;
+-
+- addr = page_address(page);
+-
+- /* Give the signal return code some randomness */
+- offset = 0x200 + (get_random_int() & 0x7fc);
+- signal_return_offset = offset;
+-
+- /*
+- * Copy signal return handlers into the vector page, and
+- * set sigreturn to be a pointer to these.
+- */
+- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+-
+- ptr = (unsigned long)addr + offset;
+- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+-
+- return page;
+-}
+diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
+index b10e136..cb5edf9 100644
+--- a/arch/arm/kernel/tcm.c
++++ b/arch/arm/kernel/tcm.c
+@@ -64,7 +64,7 @@ static struct map_desc itcm_iomap[] __initdata = {
+ .virtual = ITCM_OFFSET,
+ .pfn = __phys_to_pfn(ITCM_OFFSET),
+ .length = 0,
+- .type = MT_MEMORY_RWX_ITCM,
++ .type = MT_MEMORY_RX_ITCM,
+ }
+ };
+
+@@ -362,7 +362,9 @@ void __init tcm_init(void)
+ start = &__sitcm_text;
+ end = &__eitcm_text;
+ ram = &__itcm_start;
++ pax_open_kernel();
+ memcpy(start, ram, itcm_code_sz);
++ pax_close_kernel();
+ pr_debug("CPU ITCM: copied code from %p - %p\n",
+ start, end);
+ itcm_present = true;
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 9688ec0..dd072c0 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+ {
+ #ifdef CONFIG_KALLSYMS
+- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
++ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
+ #else
+ printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ #endif
+@@ -287,6 +287,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+ static int die_owner = -1;
+ static unsigned int die_nest_count;
+
++extern void gr_handle_kernel_exploit(void);
++
+ static unsigned long oops_begin(void)
+ {
+ int cpu;
+@@ -329,6 +331,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
++
++ gr_handle_kernel_exploit();
++
+ if (signr)
+ do_exit(signr);
+ }
+diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
+index f7f55df..49c9f9e 100644
+--- a/arch/arm/kernel/vmlinux.lds.S
++++ b/arch/arm/kernel/vmlinux.lds.S
+@@ -44,7 +44,8 @@
+ #endif
+
+ #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+- defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
++ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL) || \
++ defined(CONFIG_PAX_REFCOUNT)
+ #define ARM_EXIT_KEEP(x) x
+ #define ARM_EXIT_DISCARD(x)
+ #else
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 19b5f5c..9aa8e58 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -59,7 +59,7 @@ static unsigned long hyp_default_vectors;
+ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
+
+ /* The VMID used in the VTTBR */
+-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
++static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
+ static u32 kvm_next_vmid;
+ static unsigned int kvm_vmid_bits __read_mostly;
+ static DEFINE_SPINLOCK(kvm_vmid_lock);
+@@ -423,7 +423,7 @@ void force_vm_exit(const cpumask_t *mask)
+ */
+ static bool need_new_vmid_gen(struct kvm *kvm)
+ {
+- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
++ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
+ }
+
+ /**
+@@ -456,7 +456,7 @@ static void update_vttbr(struct kvm *kvm)
+
+ /* First user of a new VMID generation? */
+ if (unlikely(kvm_next_vmid == 0)) {
+- atomic64_inc(&kvm_vmid_gen);
++ atomic64_inc_unchecked(&kvm_vmid_gen);
+ kvm_next_vmid = 1;
+
+ /*
+@@ -473,7 +473,7 @@ static void update_vttbr(struct kvm *kvm)
+ kvm_call_hyp(__kvm_flush_vm_context);
+ }
+
+- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
++ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
+ kvm->arch.vmid = kvm_next_vmid;
+ kvm_next_vmid++;
+ kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
+diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
+index 6ee2f67..d1cce76 100644
+--- a/arch/arm/lib/copy_page.S
++++ b/arch/arm/lib/copy_page.S
+@@ -10,6 +10,7 @@
+ * ASM optimised string functions
+ */
+ #include <linux/linkage.h>
++#include <linux/const.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/cache.h>
+diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
+index 1712f13..a3165dc 100644
+--- a/arch/arm/lib/csumpartialcopyuser.S
++++ b/arch/arm/lib/csumpartialcopyuser.S
+@@ -71,8 +71,8 @@
+ * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
+ */
+
+-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
+-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
++#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
++#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
+
+ #include "csumpartialcopygeneric.S"
+
+diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
+index 6bd1089..e999400 100644
+--- a/arch/arm/lib/uaccess_with_memcpy.c
++++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -84,7 +84,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
+ return 1;
+ }
+
+-static unsigned long noinline
++static unsigned long noinline __size_overflow(3)
+ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
+ {
+ unsigned long ua_flags;
+@@ -157,7 +157,7 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
+ return n;
+ }
+
+-static unsigned long noinline
++static unsigned long noinline __size_overflow(2)
+ __clear_user_memset(void __user *addr, unsigned long n)
+ {
+ unsigned long ua_flags;
+diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
+index 06332f6..1fa0c71 100644
+--- a/arch/arm/mach-exynos/suspend.c
++++ b/arch/arm/mach-exynos/suspend.c
+@@ -724,8 +724,10 @@ void __init exynos_pm_init(void)
+ tmp |= pm_data->wake_disable_mask;
+ pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
+
+- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
+- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
++ pax_open_kernel();
++ const_cast(exynos_pm_syscore_ops.suspend) = pm_data->pm_suspend;
++ const_cast(exynos_pm_syscore_ops.resume) = pm_data->pm_resume;
++ pax_close_kernel();
+
+ register_syscore_ops(&exynos_pm_syscore_ops);
+ suspend_set_ops(&exynos_suspend_ops);
+diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c
+index afba546..9e5403d 100644
+--- a/arch/arm/mach-mmp/mmp2.c
++++ b/arch/arm/mach-mmp/mmp2.c
+@@ -98,7 +98,9 @@ void __init mmp2_init_irq(void)
+ {
+ mmp2_init_icu();
+ #ifdef CONFIG_PM
+- icu_irq_chip.irq_set_wake = mmp2_set_wake;
++ pax_open_kernel();
++ const_cast(icu_irq_chip.irq_set_wake) = mmp2_set_wake;
++ pax_close_kernel();
+ #endif
+ }
+
+diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c
+index 1ccbba9..7a95c29 100644
+--- a/arch/arm/mach-mmp/pxa910.c
++++ b/arch/arm/mach-mmp/pxa910.c
+@@ -84,7 +84,9 @@ void __init pxa910_init_irq(void)
+ {
+ icu_init_irq();
+ #ifdef CONFIG_PM
+- icu_irq_chip.irq_set_wake = pxa910_set_wake;
++ pax_open_kernel();
++ const_cast(icu_irq_chip.irq_set_wake) = pxa910_set_wake;
++ pax_close_kernel();
+ #endif
+ }
+
+diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
+index ae2a018..297ad08 100644
+--- a/arch/arm/mach-mvebu/coherency.c
++++ b/arch/arm/mach-mvebu/coherency.c
+@@ -156,7 +156,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
+
+ /*
+ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
+- * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
++ * areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This is
+ * needed for the HW I/O coherency mechanism to work properly without
+ * deadlock.
+ */
+@@ -164,7 +164,7 @@ static void __iomem *
+ armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+ unsigned int mtype, void *caller)
+ {
+- mtype = MT_UNCACHED;
++ mtype = MT_UNCACHED_RW;
+ return __arm_ioremap_caller(phys_addr, size, mtype, caller);
+ }
+
+@@ -174,7 +174,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
+
+ coherency_cpu_base = of_iomap(np, 0);
+ arch_ioremap_caller = armada_wa_ioremap_caller;
+- pci_ioremap_set_mem_type(MT_UNCACHED);
++ pci_ioremap_set_mem_type(MT_UNCACHED_RW);
+
+ /*
+ * We should switch the PL310 to I/O coherency mode only if
+diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
+index f39bd51..866c780 100644
+--- a/arch/arm/mach-mvebu/pmsu.c
++++ b/arch/arm/mach-mvebu/pmsu.c
+@@ -93,7 +93,7 @@
+ #define ARMADA_370_CRYPT0_ENG_ATTR 0x1
+
+ extern void ll_disable_coherency(void);
+-extern void ll_enable_coherency(void);
++extern int ll_enable_coherency(void);
+
+ extern void armada_370_xp_cpu_resume(void);
+ extern void armada_38x_cpu_resume(void);
+diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
+index 6b6fda6..232df1e 100644
+--- a/arch/arm/mach-omap2/board-n8x0.c
++++ b/arch/arm/mach-omap2/board-n8x0.c
+@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
+ }
+ #endif
+
+-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
++struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
+ .late_init = n8x0_menelaus_late_init,
+ };
+
+diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+index 7d62ad4..97774b1 100644
+--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
++++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+@@ -89,7 +89,7 @@ struct cpu_pm_ops {
+ void (*resume)(void);
+ void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
+ void (*hotplug_restart)(void);
+-};
++} __no_const;
+
+ static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
+ static struct powerdomain *mpuss_pd;
+@@ -107,7 +107,7 @@ static void dummy_cpu_resume(void)
+ static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
+ {}
+
+-static struct cpu_pm_ops omap_pm_ops = {
++static struct cpu_pm_ops omap_pm_ops __read_only = {
+ .finish_suspend = default_finish_suspend,
+ .resume = dummy_cpu_resume,
+ .scu_prepare = dummy_scu_prepare,
+diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
+index b4de3da..e027393 100644
+--- a/arch/arm/mach-omap2/omap-smp.c
++++ b/arch/arm/mach-omap2/omap-smp.c
+@@ -19,6 +19,7 @@
+ #include <linux/device.h>
+ #include <linux/smp.h>
+ #include <linux/io.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+
+ #include <asm/smp_scu.h>
+diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
+index e920dd8..ef999171 100644
+--- a/arch/arm/mach-omap2/omap_device.c
++++ b/arch/arm/mach-omap2/omap_device.c
+@@ -530,7 +530,7 @@ void omap_device_delete(struct omap_device *od)
+ struct platform_device __init *omap_device_build(const char *pdev_name,
+ int pdev_id,
+ struct omap_hwmod *oh,
+- void *pdata, int pdata_len)
++ const void *pdata, int pdata_len)
+ {
+ struct omap_hwmod *ohs[] = { oh };
+
+@@ -558,7 +558,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
+ struct platform_device __init *omap_device_build_ss(const char *pdev_name,
+ int pdev_id,
+ struct omap_hwmod **ohs,
+- int oh_cnt, void *pdata,
++ int oh_cnt, const void *pdata,
+ int pdata_len)
+ {
+ int ret = -ENOMEM;
+diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
+index 78c02b3..c94109a 100644
+--- a/arch/arm/mach-omap2/omap_device.h
++++ b/arch/arm/mach-omap2/omap_device.h
+@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
+ /* Core code interface */
+
+ struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
+- struct omap_hwmod *oh, void *pdata,
++ struct omap_hwmod *oh, const void *pdata,
+ int pdata_len);
+
+ struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
+ struct omap_hwmod **oh, int oh_cnt,
+- void *pdata, int pdata_len);
++ const void *pdata, int pdata_len);
+
+ struct omap_device *omap_device_alloc(struct platform_device *pdev,
+ struct omap_hwmod **ohs, int oh_cnt);
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 1052b29..54669b0 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -206,10 +206,10 @@ struct omap_hwmod_soc_ops {
+ void (*update_context_lost)(struct omap_hwmod *oh);
+ int (*get_context_lost)(struct omap_hwmod *oh);
+ int (*disable_direct_prcm)(struct omap_hwmod *oh);
+-};
++} __no_const;
+
+ /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
+-static struct omap_hwmod_soc_ops soc_ops;
++static struct omap_hwmod_soc_ops soc_ops __read_only;
+
+ /* omap_hwmod_list contains all registered struct omap_hwmods */
+ static LIST_HEAD(omap_hwmod_list);
+diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
+index 95fee54..b5dd79d 100644
+--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
++++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
+@@ -10,6 +10,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/init.h>
++#include <asm/pgtable.h>
+
+ #include "powerdomain.h"
+
+@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
+
+ void __init am43xx_powerdomains_init(void)
+ {
+- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
++ pax_open_kernel();
++ const_cast(omap4_pwrdm_operations.pwrdm_has_voltdm) = am43xx_check_vcvp;
++ pax_close_kernel();
+ pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
+ pwrdm_register_pwrdms(powerdomains_am43xx);
+ pwrdm_complete_init();
+diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
+index ff0a68c..b312aa0 100644
+--- a/arch/arm/mach-omap2/wd_timer.c
++++ b/arch/arm/mach-omap2/wd_timer.c
+@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
+ struct omap_hwmod *oh;
+ char *oh_name = "wd_timer2";
+ char *dev_name = "omap_wdt";
+- struct omap_wd_timer_platform_data pdata;
++ static struct omap_wd_timer_platform_data pdata = {
++ .read_reset_sources = prm_read_reset_sources
++ };
+
+ if (!cpu_class_is_omap2() || of_have_populated_dt())
+ return 0;
+@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
+ return -EINVAL;
+ }
+
+- pdata.read_reset_sources = prm_read_reset_sources;
+-
+ pdev = omap_device_build(dev_name, id, oh, &pdata,
+ sizeof(struct omap_wd_timer_platform_data));
+ WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
+diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
+index 92ec8c3..3b09472 100644
+--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
++++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
+@@ -240,7 +240,7 @@ static struct platform_device smdk6410_b_pwr_5v = {
+ };
+ #endif
+
+-static struct s3c_ide_platdata smdk6410_ide_pdata __initdata = {
++static const struct s3c_ide_platdata smdk6410_ide_pdata __initconst = {
+ .setup_gpio = s3c64xx_ide_setup_gpio,
+ };
+
+diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
+index 0c6bb45..0f18d70 100644
+--- a/arch/arm/mach-shmobile/platsmp-apmu.c
++++ b/arch/arm/mach-shmobile/platsmp-apmu.c
+@@ -22,6 +22,7 @@
+ #include <asm/proc-fns.h>
+ #include <asm/smp_plat.h>
+ #include <asm/suspend.h>
++#include <asm/pgtable.h>
+ #include "common.h"
+ #include "platsmp-apmu.h"
+ #include "rcar-gen2.h"
+@@ -316,6 +317,8 @@ static int shmobile_smp_apmu_enter_suspend(suspend_state_t state)
+
+ void __init shmobile_smp_apmu_suspend_init(void)
+ {
+- shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
++ pax_open_kernel();
++ const_cast(shmobile_suspend_ops.enter) = shmobile_smp_apmu_enter_suspend;
++ pax_close_kernel();
+ }
+ #endif
+diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
+index afcee04..63e52ac 100644
+--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
++++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
+@@ -178,7 +178,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
+ bool entered_lp2 = false;
+
+ if (tegra_pending_sgi())
+- ACCESS_ONCE(abort_flag) = true;
++ ACCESS_ONCE_RW(abort_flag) = true;
+
+ cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
+
+diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
+index a69b22d..8523a03 100644
+--- a/arch/arm/mach-tegra/irq.c
++++ b/arch/arm/mach-tegra/irq.c
+@@ -20,6 +20,7 @@
+ #include <linux/cpu_pm.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include <linux/irq.h>
+ #include <linux/kernel.h>
+diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
+index a970e7f..6f2bf9a 100644
+--- a/arch/arm/mach-ux500/pm.c
++++ b/arch/arm/mach-ux500/pm.c
+@@ -10,6 +10,7 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
+index 7cd9865..a00b6ab 100644
+--- a/arch/arm/mach-zynq/platsmp.c
++++ b/arch/arm/mach-zynq/platsmp.c
+@@ -24,6 +24,7 @@
+ #include <linux/io.h>
+ #include <asm/cacheflush.h>
+ #include <asm/smp_scu.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include "common.h"
+
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index c1799dd..9111dcc 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -446,6 +446,7 @@ config CPU_32v5
+
+ config CPU_32v6
+ bool
++ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ select TLS_REG_EMUL if !CPU_32v6K && !MMU
+
+ config CPU_32v6K
+@@ -603,6 +604,7 @@ config CPU_CP15_MPU
+
+ config CPU_USE_DOMAINS
+ bool
++ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ help
+ This option enables or disables the use of domain switching
+ via the set_fs() function.
+@@ -813,7 +815,7 @@ config NEED_KUSER_HELPERS
+
+ config KUSER_HELPERS
+ bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+- depends on MMU
++ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
+ default y
+ help
+ Warning: disabling this option may break user programs.
+@@ -827,7 +829,7 @@ config KUSER_HELPERS
+ See Documentation/arm/kernel_user_helpers.txt for details.
+
+ However, the fixed address nature of these helpers can be used
+- by ROP (return orientated programming) authors when creating
++ by ROP (Return Oriented Programming) authors when creating
+ exploits.
+
+ If all of the binaries and libraries which run on your platform
+@@ -842,7 +844,7 @@ config KUSER_HELPERS
+
+ config VDSO
+ bool "Enable VDSO for acceleration of some system calls"
+- depends on AEABI && MMU && CPU_V7
++ depends on AEABI && MMU && CPU_V7 && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ default y if ARM_ARCH_TIMER
+ select GENERIC_TIME_VSYSCALL
+ help
+diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
+index 7d5f4c7..c6a0816 100644
+--- a/arch/arm/mm/alignment.c
++++ b/arch/arm/mm/alignment.c
+@@ -778,6 +778,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ u16 tinstr = 0;
+ int isize = 4;
+ int thumb2_32b = 0;
++ bool is_user_mode = user_mode(regs);
+
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+@@ -786,14 +787,24 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+
+ if (thumb_mode(regs)) {
+ u16 *ptr = (u16 *)(instrptr & ~1);
+- fault = probe_kernel_address(ptr, tinstr);
++ if (is_user_mode) {
++ pax_open_userland();
++ fault = probe_kernel_address(ptr, tinstr);
++ pax_close_userland();
++ } else
++ fault = probe_kernel_address(ptr, tinstr);
+ tinstr = __mem_to_opcode_thumb16(tinstr);
+ if (!fault) {
+ if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
+ IS_T32(tinstr)) {
+ /* Thumb-2 32-bit */
+ u16 tinst2 = 0;
+- fault = probe_kernel_address(ptr + 1, tinst2);
++ if (is_user_mode) {
++ pax_open_userland();
++ fault = probe_kernel_address(ptr + 1, tinst2);
++ pax_close_userland();
++ } else
++ fault = probe_kernel_address(ptr + 1, tinst2);
+ tinst2 = __mem_to_opcode_thumb16(tinst2);
+ instr = __opcode_thumb32_compose(tinstr, tinst2);
+ thumb2_32b = 1;
+@@ -803,7 +814,12 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+ }
+ } else {
+- fault = probe_kernel_address((void *)instrptr, instr);
++ if (is_user_mode) {
++ pax_open_userland();
++ fault = probe_kernel_address((void *)instrptr, instr);
++ pax_close_userland();
++ } else
++ fault = probe_kernel_address((void *)instrptr, instr);
+ instr = __mem_to_opcode_arm(instr);
+ }
+
+@@ -812,7 +828,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ goto bad_or_fault;
+ }
+
+- if (user_mode(regs))
++ if (is_user_mode)
+ goto user;
+
+ ai_sys += 1;
+diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
+index d1870c7..36d500f 100644
+--- a/arch/arm/mm/cache-l2x0.c
++++ b/arch/arm/mm/cache-l2x0.c
+@@ -44,7 +44,7 @@ struct l2c_init_data {
+ void (*configure)(void __iomem *);
+ void (*unlock)(void __iomem *, unsigned);
+ struct outer_cache_fns outer_cache;
+-};
++} __do_const;
+
+ #define CACHE_LINE_SIZE 32
+
+diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
+index c8c8b9e..c55cc79 100644
+--- a/arch/arm/mm/context.c
++++ b/arch/arm/mm/context.c
+@@ -43,7 +43,7 @@
+ #define NUM_USER_ASIDS ASID_FIRST_VERSION
+
+ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
++static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
+ static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
+
+ static DEFINE_PER_CPU(atomic64_t, active_asids);
+@@ -193,7 +193,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+ {
+ static u32 cur_idx = 1;
+ u64 asid = atomic64_read(&mm->context.id);
+- u64 generation = atomic64_read(&asid_generation);
++ u64 generation = atomic64_read_unchecked(&asid_generation);
+
+ if (asid != 0) {
+ u64 newasid = generation | (asid & ~ASID_MASK);
+@@ -225,7 +225,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+ */
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
+ if (asid == NUM_USER_ASIDS) {
+- generation = atomic64_add_return(ASID_FIRST_VERSION,
++ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
+ &asid_generation);
+ flush_context(cpu);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+@@ -254,14 +254,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
+ cpu_set_reserved_ttbr0();
+
+ asid = atomic64_read(&mm->context.id);
+- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
++ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
+ && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
+ goto switch_mm_fastpath;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ /* Check that our ASID belongs to the current generation. */
+ asid = atomic64_read(&mm->context.id);
+- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
++ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
+ asid = new_context(mm, cpu);
+ atomic64_set(&mm->context.id, asid);
+ }
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 0122ad1..1aae1cb 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -25,6 +25,7 @@
+ #include <asm/system_misc.h>
+ #include <asm/system_info.h>
+ #include <asm/tlbflush.h>
++#include <asm/sections.h>
+
+ #include "fault.h"
+
+@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
+ if (fixup_exception(regs))
+ return;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (addr < TASK_SIZE) {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ }
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if ((fsr & FSR_WRITE) &&
++ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
++ (MODULES_VADDR <= addr && addr < MODULES_END)))
++ {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++ }
++#endif
++
+ /*
+ * No handler, we'll have to terminate things with extreme prejudice.
+ */
+@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((tsk->mm->pax_flags & MF_PAX_PAGEEXEC) && (fsr & FSR_LNX_PF)) {
++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ tsk->thread.address = addr;
+ tsk->thread.error_code = fsr;
+ tsk->thread.trap_no = 14;
+@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+ #endif /* CONFIG_MMU */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (__force unsigned char __user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-4: ");
++ for (i = -1; i < 20; i++) {
++ unsigned long c;
++ if (get_user(c, (__force unsigned long __user *)sp+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08lx ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * First Level Translation Fault Handler
+ *
+@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
+ struct siginfo info;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ goto die;
++ }
++#endif
++
+ if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
+ return;
+
++die:
+ pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
+ inf->name, fsr, addr);
+ show_pte(current->mm, addr);
+@@ -574,15 +647,118 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
+ ifsr_info[nr].name = name;
+ }
+
++asmlinkage int sys_sigreturn(struct pt_regs *regs);
++asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
++
+ asmlinkage void __exception
+ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
+ {
+ const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+ struct siginfo info;
++ unsigned long pc = instruction_pointer(regs);
++
++ if (user_mode(regs)) {
++ unsigned long sigpage = current->mm->context.sigpage;
++
++ if (sigpage <= pc && pc < sigpage + 7*4) {
++ if (pc < sigpage + 3*4)
++ sys_sigreturn(regs);
++ else
++ sys_rt_sigreturn(regs);
++ return;
++ }
++ if (pc == 0xffff0f60UL) {
++ /*
++ * PaX: __kuser_cmpxchg64 emulation
++ */
++ // TODO
++ //regs->ARM_pc = regs->ARM_lr;
++ //return;
++ }
++ if (pc == 0xffff0fa0UL) {
++ /*
++ * PaX: __kuser_memory_barrier emulation
++ */
++ // dmb(); implied by the exception
++ regs->ARM_pc = regs->ARM_lr;
++#ifdef CONFIG_ARM_THUMB
++ if (regs->ARM_lr & 1) {
++ regs->ARM_cpsr |= PSR_T_BIT;
++ regs->ARM_pc &= ~0x1U;
++ } else
++ regs->ARM_cpsr &= ~PSR_T_BIT;
++#endif
++ return;
++ }
++ if (pc == 0xffff0fc0UL) {
++ /*
++ * PaX: __kuser_cmpxchg emulation
++ */
++ // TODO
++ //long new;
++ //int op;
++
++ //op = FUTEX_OP_SET << 28;
++ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
++ //regs->ARM_r0 = old != new;
++ //regs->ARM_pc = regs->ARM_lr;
++ //return;
++ }
++ if (pc == 0xffff0fe0UL) {
++ /*
++ * PaX: __kuser_get_tls emulation
++ */
++ regs->ARM_r0 = current_thread_info()->tp_value[0];
++ regs->ARM_pc = regs->ARM_lr;
++#ifdef CONFIG_ARM_THUMB
++ if (regs->ARM_lr & 1) {
++ regs->ARM_cpsr |= PSR_T_BIT;
++ regs->ARM_pc &= ~0x1U;
++ } else
++ regs->ARM_cpsr &= ~PSR_T_BIT;
++#endif
++ return;
++ }
++ }
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
++ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
++ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
++ goto die;
++ }
++#endif
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
++#ifdef CONFIG_THUMB2_KERNEL
++ unsigned short bkpt;
++
++ if (!probe_kernel_address((const unsigned short *)pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
++#else
++ unsigned int bkpt;
++
++ if (!probe_kernel_address((const unsigned int *)pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
++#endif
++ current->thread.error_code = ifsr;
++ current->thread.trap_no = 0;
++ pax_report_refcount_error(regs, NULL);
++ fixup_exception(regs);
++ return;
++ }
++ }
++#endif
+
+ if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+ return;
+
++die:
+ pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+ inf->name, ifsr, addr);
+
+diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
+index afc1f84..b1daab5 100644
+--- a/arch/arm/mm/fault.h
++++ b/arch/arm/mm/fault.h
+@@ -3,6 +3,7 @@
+
+ /*
+ * Fault status register encodings. We steal bit 31 for our own purposes.
++ * Set when the FSR value is from an instruction fault.
+ */
+ #define FSR_LNX_PF (1 << 31)
+ #define FSR_WRITE (1 << 11)
+@@ -26,6 +27,17 @@ static inline int fsr_fs(unsigned int fsr)
+ }
+ #endif
+
++/* valid for LPAE and !LPAE */
++static inline int is_xn_fault(unsigned int fsr)
++{
++ return ((fsr_fs(fsr) & 0x3c) == 0xc);
++}
++
++static inline int is_domain_fault(unsigned int fsr)
++{
++ return ((fsr_fs(fsr) & 0xD) == 0x9);
++}
++
+ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
+ void early_abt_enable(void);
+
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index 370581a..b985cc1 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -747,7 +747,46 @@ void free_tcmmem(void)
+ {
+ #ifdef CONFIG_HAVE_TCM
+ extern char __tcm_start, __tcm_end;
++#endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long addr;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ int cpu_arch = cpu_architecture();
++ unsigned int cr = get_cr();
++
++ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
++ /* make pages tables, etc before .text NX */
++ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ __section_update(pmd, addr, PMD_SECT_XN);
++ }
++ /* make init NX */
++ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ __section_update(pmd, addr, PMD_SECT_XN);
++ }
++ /* make kernel code/rodata RX */
++ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++#ifdef CONFIG_ARM_LPAE
++ __section_update(pmd, addr, PMD_SECT_RDONLY);
++#else
++ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
++#endif
++ }
++ }
++#endif
++
++#ifdef CONFIG_HAVE_TCM
+ poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
+ free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
+ #endif
+diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
+index ff0eed2..f17f1c9 100644
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -411,9 +411,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
+ unsigned int mtype;
+
+ if (cached)
+- mtype = MT_MEMORY_RWX;
++ mtype = MT_MEMORY_RX;
+ else
+- mtype = MT_MEMORY_RWX_NONCACHED;
++ mtype = MT_MEMORY_RX_NONCACHED;
+
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index 66353ca..8aad9f8 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct vm_area_struct *vma;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ /*
+@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -99,19 +103,21 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+ unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+- const unsigned long len, const unsigned long pgoff,
+- const unsigned long flags)
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++ unsigned long len, unsigned long pgoff,
++ unsigned long flags)
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ /*
+@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ return addr;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ if (do_align)
+@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ else
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ /*
+@@ -182,14 +192,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 4001dd1..c6dce7b 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -243,6 +243,14 @@ __setup("noalign", noalign_setup);
+ #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
+ #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define L_PTE_KERNEXEC L_PTE_RDONLY
++#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
++#else
++#define L_PTE_KERNEXEC L_PTE_DIRTY
++#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
++#endif
++
+ static struct mem_type mem_types[] __ro_after_init = {
+ [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+@@ -272,19 +280,19 @@ static struct mem_type mem_types[] __ro_after_init = {
+ .prot_sect = PROT_SECT_DEVICE,
+ .domain = DOMAIN_IO,
+ },
+- [MT_UNCACHED] = {
++ [MT_UNCACHED_RW] = {
+ .prot_pte = PROT_PTE_DEVICE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+ .domain = DOMAIN_IO,
+ },
+- [MT_CACHECLEAN] = {
+- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
++ [MT_CACHECLEAN_RO] = {
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
+ .domain = DOMAIN_KERNEL,
+ },
+ #ifndef CONFIG_ARM_LPAE
+- [MT_MINICLEAN] = {
+- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
++ [MT_MINICLEAN_RO] = {
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
+ .domain = DOMAIN_KERNEL,
+ },
+ #endif
+@@ -300,7 +308,7 @@ static struct mem_type mem_types[] __ro_after_init = {
+ .prot_l1 = PMD_TYPE_TABLE,
+ .domain = DOMAIN_VECTORS,
+ },
+- [MT_MEMORY_RWX] = {
++ [__MT_MEMORY_RWX] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+@@ -313,17 +321,30 @@ static struct mem_type mem_types[] __ro_after_init = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
+- [MT_ROM] = {
+- .prot_sect = PMD_TYPE_SECT,
++ [MT_MEMORY_RX] = {
++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
++ .prot_l1 = PMD_TYPE_TABLE,
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
++ .domain = DOMAIN_KERNEL,
++ },
++ [MT_ROM_RX] = {
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
+ .domain = DOMAIN_KERNEL,
+ },
+- [MT_MEMORY_RWX_NONCACHED] = {
++ [MT_MEMORY_RW_NONCACHED] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_MT_BUFFERABLE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
++ [MT_MEMORY_RX_NONCACHED] = {
++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
++ L_PTE_MT_BUFFERABLE,
++ .prot_l1 = PMD_TYPE_TABLE,
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
++ .domain = DOMAIN_KERNEL,
++ },
+ [MT_MEMORY_RW_DTCM] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_XN,
+@@ -331,9 +352,10 @@ static struct mem_type mem_types[] __ro_after_init = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
+ },
+- [MT_MEMORY_RWX_ITCM] = {
+- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
++ [MT_MEMORY_RX_ITCM] = {
++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
+ .prot_l1 = PMD_TYPE_TABLE,
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
+ .domain = DOMAIN_KERNEL,
+ },
+ [MT_MEMORY_RW_SO] = {
+@@ -586,9 +608,14 @@ static void __init build_mem_type_table(void)
+ * Mark cache clean areas and XIP ROM read only
+ * from SVC mode and no access from userspace.
+ */
+- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++#ifdef CONFIG_PAX_KERNEXEC
++ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++#endif
++ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+ #endif
+
+ /*
+@@ -605,13 +632,17 @@ static void __init build_mem_type_table(void)
+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
+- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
+- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
++ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
++ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
++ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
++ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
+ }
+
+@@ -622,15 +653,20 @@ static void __init build_mem_type_table(void)
+ if (cpu_arch >= CPU_ARCH_ARMv6) {
+ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+ /* Non-cacheable Normal is XCB = 001 */
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
++ PMD_SECT_BUFFERED;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
+ PMD_SECT_BUFFERED;
+ } else {
+ /* For both ARMv6 and non-TEX-remapping ARMv7 */
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
++ PMD_SECT_TEX(1);
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
+ PMD_SECT_TEX(1);
+ }
+ } else {
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+ }
+
+ #ifdef CONFIG_ARM_LPAE
+@@ -651,6 +687,8 @@ static void __init build_mem_type_table(void)
+ user_pgprot |= PTE_EXT_PXN;
+ #endif
+
++ user_pgprot |= __supported_pte_mask;
++
+ for (i = 0; i < 16; i++) {
+ pteval_t v = pgprot_val(protection_map[i]);
+ protection_map[i] = __pgprot(v | user_pgprot);
+@@ -668,21 +706,24 @@ static void __init build_mem_type_table(void)
+
+ mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
+ mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
+- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
+- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
++ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
++ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
++ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
++ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
+- mem_types[MT_ROM].prot_sect |= cp->pmd;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
++ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
+
+ switch (cp->pmd) {
+ case PMD_SECT_WT:
+- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
+ break;
+ case PMD_SECT_WB:
+ case PMD_SECT_WBWA:
+- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
+ break;
+ }
+ pr_info("Memory policy: %sData cache %s\n",
+@@ -959,7 +1000,7 @@ static void __init create_mapping(struct map_desc *md)
+ return;
+ }
+
+- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
++ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
+ md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
+ (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
+ pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
+@@ -1320,18 +1361,15 @@ void __init arm_mm_memblock_reserve(void)
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
+ */
++
++static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
++
+ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ {
+ struct map_desc map;
+ unsigned long addr;
+- void *vectors;
+
+- /*
+- * Allocate the vector page early.
+- */
+- vectors = early_alloc(PAGE_SIZE * 2);
+-
+- early_trap_init(vectors);
++ early_trap_init(&vectors);
+
+ /*
+ * Clear page table except top pmd used by early fixmaps
+@@ -1347,7 +1385,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
+ map.virtual = MODULES_VADDR;
+ map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
+- map.type = MT_ROM;
++ map.type = MT_ROM_RX;
+ create_mapping(&map);
+ #endif
+
+@@ -1358,14 +1396,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
+ map.virtual = FLUSH_BASE;
+ map.length = SZ_1M;
+- map.type = MT_CACHECLEAN;
++ map.type = MT_CACHECLEAN_RO;
+ create_mapping(&map);
+ #endif
+ #ifdef FLUSH_BASE_MINICACHE
+ map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
+ map.virtual = FLUSH_BASE_MINICACHE;
+ map.length = SZ_1M;
+- map.type = MT_MINICLEAN;
++ map.type = MT_MINICLEAN_RO;
+ create_mapping(&map);
+ #endif
+
+@@ -1374,7 +1412,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ * location (0xffff0000). If we aren't using high-vectors, also
+ * create a mapping at the low-vectors virtual address.
+ */
+- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
++ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
+ map.virtual = 0xffff0000;
+ map.length = PAGE_SIZE;
+ #ifdef CONFIG_KUSER_HELPERS
+@@ -1437,12 +1475,14 @@ static void __init kmap_init(void)
+ static void __init map_lowmem(void)
+ {
+ struct memblock_region *reg;
++#ifndef CONFIG_PAX_KERNEXEC
+ #ifdef CONFIG_XIP_KERNEL
+ phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE);
+ #else
+ phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+ #endif
+ phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
++#endif
+
+ /* Map all the lowmem memory banks. */
+ for_each_memblock(memory, reg) {
+@@ -1458,11 +1498,48 @@ static void __init map_lowmem(void)
+ if (start >= end)
+ break;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ map.pfn = __phys_to_pfn(start);
++ map.virtual = __phys_to_virt(start);
++ map.length = end - start;
++
++ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
++ struct map_desc kernel;
++ struct map_desc initmap;
++
++ /* when freeing initmem we will make this RW */
++ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
++ initmap.virtual = (unsigned long)__init_begin;
++ initmap.length = _sdata - __init_begin;
++ initmap.type = __MT_MEMORY_RWX;
++ create_mapping(&initmap);
++
++ /* when freeing initmem we will make this RX */
++ kernel.pfn = __phys_to_pfn(__pa(_stext));
++ kernel.virtual = (unsigned long)_stext;
++ kernel.length = __init_begin - _stext;
++ kernel.type = __MT_MEMORY_RWX;
++ create_mapping(&kernel);
++
++ if (map.virtual < (unsigned long)_stext) {
++ map.length = (unsigned long)_stext - map.virtual;
++ map.type = __MT_MEMORY_RWX;
++ create_mapping(&map);
++ }
++
++ map.pfn = __phys_to_pfn(__pa(_sdata));
++ map.virtual = (unsigned long)_sdata;
++ map.length = end - __pa(_sdata);
++ }
++
++ map.type = MT_MEMORY_RW;
++ create_mapping(&map);
++#else
+ if (end < kernel_x_start) {
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+- map.type = MT_MEMORY_RWX;
++ map.type = __MT_MEMORY_RWX;
+
+ create_mapping(&map);
+ } else if (start >= kernel_x_end) {
+@@ -1486,7 +1563,7 @@ static void __init map_lowmem(void)
+ map.pfn = __phys_to_pfn(kernel_x_start);
+ map.virtual = __phys_to_virt(kernel_x_start);
+ map.length = kernel_x_end - kernel_x_start;
+- map.type = MT_MEMORY_RWX;
++ map.type = __MT_MEMORY_RWX;
+
+ create_mapping(&map);
+
+@@ -1499,6 +1576,7 @@ static void __init map_lowmem(void)
+ create_mapping(&map);
+ }
+ }
++#endif
+ }
+ }
+
+diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
+index 93d0b6d..2db6d99 100644
+--- a/arch/arm/net/bpf_jit_32.c
++++ b/arch/arm/net/bpf_jit_32.c
+@@ -20,6 +20,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/hwcap.h>
+ #include <asm/opcodes.h>
++#include <asm/pgtable.h>
+
+ #include "bpf_jit_32.h"
+
+@@ -72,54 +73,38 @@ struct jit_ctx {
+ #endif
+ };
+
++#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
++int bpf_jit_enable __read_only;
++#else
+ int bpf_jit_enable __read_mostly;
++#endif
+
+-static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
+- unsigned int size)
+-{
+- void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
+-
+- if (!ptr)
+- return -EFAULT;
+- memcpy(ret, ptr, size);
+- return 0;
+-}
+-
+-static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
+ {
+ u8 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 1);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 1);
++ err = skb_copy_bits(skb, offset, &ret, 1);
+
+ return (u64)err << 32 | ret;
+ }
+
+-static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
+ {
+ u16 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 2);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 2);
++ err = skb_copy_bits(skb, offset, &ret, 2);
+
+ return (u64)err << 32 | ntohs(ret);
+ }
+
+-static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
+ {
+ u32 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 4);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 4);
++ err = skb_copy_bits(skb, offset, &ret, 4);
+
+ return (u64)err << 32 | ntohl(ret);
+ }
+@@ -191,8 +176,10 @@ static void jit_fill_hole(void *area, unsigned int size)
+ {
+ u32 *ptr;
+ /* We are guaranteed to have aligned memory. */
++ pax_open_kernel();
+ for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
+ *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
++ pax_close_kernel();
+ }
+
+ static void build_prologue(struct jit_ctx *ctx)
+@@ -554,6 +541,9 @@ static int build_body(struct jit_ctx *ctx)
+ case BPF_LD | BPF_B | BPF_ABS:
+ load_order = 0;
+ load:
++ /* the interpreter will deal with the negative K */
++ if ((int)k < 0)
++ return -ENOTSUPP;
+ emit_mov_i(r_off, k, ctx);
+ load_common:
+ ctx->seen |= SEEN_DATA | SEEN_CALL;
+@@ -568,18 +558,6 @@ static int build_body(struct jit_ctx *ctx)
+ condt = ARM_COND_HI;
+ }
+
+- /*
+- * test for negative offset, only if we are
+- * currently scheduled to take the fast
+- * path. this will update the flags so that
+- * the slowpath instruction are ignored if the
+- * offset is negative.
+- *
+- * for loard_order == 0 the HI condition will
+- * make loads at offset 0 take the slow path too.
+- */
+- _emit(condt, ARM_CMP_I(r_off, 0), ctx);
+-
+ _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
+ ctx);
+
+diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
+index 8151bde..9be301f 100644
+--- a/arch/arm/plat-iop/setup.c
++++ b/arch/arm/plat-iop/setup.c
+@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
+ .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
+ .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
+ .length = IOP3XX_PERIPHERAL_SIZE,
+- .type = MT_UNCACHED,
++ .type = MT_UNCACHED_RW,
+ },
+ };
+
+diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
+index a5bc92d..0bb4730 100644
+--- a/arch/arm/plat-omap/sram.c
++++ b/arch/arm/plat-omap/sram.c
+@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
+ * Looks like we need to preserve some bootloader code at the
+ * beginning of SRAM for jumping to flash for reboot to work...
+ */
++ pax_open_kernel();
+ memset_io(omap_sram_base + omap_sram_skip, 0,
+ omap_sram_size - omap_sram_skip);
++ pax_close_kernel();
+ }
+diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
+index a4ec240..96faf9b 100644
+--- a/arch/arm/probes/kprobes/core.c
++++ b/arch/arm/probes/kprobes/core.c
+@@ -485,6 +485,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+ return (void *)orig_ret_address;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -493,6 +494,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr. */
+ regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
+ }
++#endif
+
+ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+ {
+@@ -605,10 +607,12 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+ return 0;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ return 0;
+ }
++#endif
+
+ #ifdef CONFIG_THUMB2_KERNEL
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index cf57a77..ab33bd2 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -906,6 +906,7 @@ config RELOCATABLE
+
+ config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
++ depends on BROKEN_SECURITY
+ select ARM64_MODULE_PLTS if MODULES
+ select RELOCATABLE
+ help
+diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
+index b661fe7..6d124fc 100644
+--- a/arch/arm64/Kconfig.debug
++++ b/arch/arm64/Kconfig.debug
+@@ -6,6 +6,7 @@ config ARM64_PTDUMP
+ bool "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ help
+ Say Y here if you want to show the kernel pagetable layout in a
+ debugfs file. This information is only useful for kernel developers
+diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
+index aefda98..2937874 100644
+--- a/arch/arm64/crypto/sha1-ce-glue.c
++++ b/arch/arm64/crypto/sha1-ce-glue.c
+@@ -29,7 +29,7 @@ struct sha1_ce_state {
+ u32 finalize;
+ };
+
+-asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
++asmlinkage void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
+ int blocks);
+
+ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
+@@ -39,8 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
+
+ sctx->finalize = 0;
+ kernel_neon_begin_partial(16);
+- sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_ce_transform);
++ sha1_base_do_update(desc, data, len, sha1_ce_transform);
+ kernel_neon_end();
+
+ return 0;
+@@ -64,10 +63,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
+ sctx->finalize = finalize;
+
+ kernel_neon_begin_partial(16);
+- sha1_base_do_update(desc, data, len,
+- (sha1_block_fn *)sha1_ce_transform);
++ sha1_base_do_update(desc, data, len, sha1_ce_transform);
+ if (!finalize)
+- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
++ sha1_base_do_finalize(desc, sha1_ce_transform);
+ kernel_neon_end();
+ return sha1_base_finish(desc, out);
+ }
+@@ -78,7 +76,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
+
+ sctx->finalize = 0;
+ kernel_neon_begin_partial(16);
+- sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
++ sha1_base_do_finalize(desc, sha1_ce_transform);
+ kernel_neon_end();
+ return sha1_base_finish(desc, out);
+ }
+diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
+index c0235e0..86eb684 100644
+--- a/arch/arm64/include/asm/atomic.h
++++ b/arch/arm64/include/asm/atomic.h
+@@ -57,11 +57,13 @@
+ #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+
+ #define atomic_add_return_relaxed atomic_add_return_relaxed
++#define atomic_add_return_unchecked_relaxed atomic_add_return_relaxed
+ #define atomic_add_return_acquire atomic_add_return_acquire
+ #define atomic_add_return_release atomic_add_return_release
+ #define atomic_add_return atomic_add_return
+
+ #define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
++#define atomic_inc_return_unchecked_relaxed(v) atomic_add_return_relaxed(1, (v))
+ #define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
+ #define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
+ #define atomic_inc_return(v) atomic_add_return(1, (v))
+@@ -128,6 +130,8 @@
+ #define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,)
+ #define atomic_andnot atomic_andnot
+
++#define atomic_inc_return_unchecked_relaxed(v) atomic_add_return_relaxed(1, (v))
++
+ /*
+ * 64-bit atomic operations.
+ */
+@@ -206,5 +210,16 @@
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++#define atomic64_xchg_unchecked(v, n) atomic64_xchg((v), (n))
++
+ #endif
+ #endif
+diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
+index 5082b30..9ef38c2 100644
+--- a/arch/arm64/include/asm/cache.h
++++ b/arch/arm64/include/asm/cache.h
+@@ -16,10 +16,14 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
++
+ #include <asm/cachetype.h>
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 7
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
+index 5394c84..05e5a95 100644
+--- a/arch/arm64/include/asm/percpu.h
++++ b/arch/arm64/include/asm/percpu.h
+@@ -123,16 +123,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
+ {
+ switch (size) {
+ case 1:
+- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
++ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
+ break;
+ case 2:
+- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
++ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
+ break;
+ case 4:
+- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
++ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
+ break;
+ case 8:
+- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
++ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
+ break;
+ default:
+ BUILD_BUG();
+diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
+index d25f4f1..61d52da 100644
+--- a/arch/arm64/include/asm/pgalloc.h
++++ b/arch/arm64/include/asm/pgalloc.h
+@@ -51,6 +51,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+ __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
+ #else
+ static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+ {
+@@ -80,6 +85,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+ {
+ __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
+ }
++
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++ pgd_populate(mm, pgd, pud);
++}
+ #else
+ static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+ {
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index ffbb9a5..d8b49ff 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -23,6 +23,9 @@
+ #include <asm/pgtable-hwdef.h>
+ #include <asm/pgtable-prot.h>
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ /*
+ * VMALLOC range.
+ *
+@@ -728,6 +731,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
+ #define kc_vaddr_to_offset(v) ((v) & ~VA_START)
+ #define kc_offset_to_vaddr(o) ((o) | VA_START)
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* __ASM_PGTABLE_H */
+diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
+index 2eb714c..3a10471 100644
+--- a/arch/arm64/include/asm/string.h
++++ b/arch/arm64/include/asm/string.h
+@@ -17,40 +17,40 @@
+ #define __ASM_STRING_H
+
+ #define __HAVE_ARCH_STRRCHR
+-extern char *strrchr(const char *, int c);
++extern char *strrchr(const char *, int c) __nocapture(-1);
+
+ #define __HAVE_ARCH_STRCHR
+-extern char *strchr(const char *, int c);
++extern char *strchr(const char *, int c) __nocapture(-1);
+
+ #define __HAVE_ARCH_STRCMP
+-extern int strcmp(const char *, const char *);
++extern int strcmp(const char *, const char *) __nocapture();
+
+ #define __HAVE_ARCH_STRNCMP
+-extern int strncmp(const char *, const char *, __kernel_size_t);
++extern int strncmp(const char *, const char *, __kernel_size_t) __nocapture(1, 2);
+
+ #define __HAVE_ARCH_STRLEN
+-extern __kernel_size_t strlen(const char *);
++extern __kernel_size_t strlen(const char *) __nocapture(1);
+
+ #define __HAVE_ARCH_STRNLEN
+-extern __kernel_size_t strnlen(const char *, __kernel_size_t);
++extern __kernel_size_t strnlen(const char *, __kernel_size_t) __nocapture(1);
+
+ #define __HAVE_ARCH_MEMCPY
+-extern void *memcpy(void *, const void *, __kernel_size_t);
+-extern void *__memcpy(void *, const void *, __kernel_size_t);
++extern void *memcpy(void *, const void *, __kernel_size_t) __nocapture(2);
++extern void *__memcpy(void *, const void *, __kernel_size_t) __nocapture(2);
+
+ #define __HAVE_ARCH_MEMMOVE
+-extern void *memmove(void *, const void *, __kernel_size_t);
+-extern void *__memmove(void *, const void *, __kernel_size_t);
++extern void *memmove(void *, const void *, __kernel_size_t) __nocapture(2);
++extern void *__memmove(void *, const void *, __kernel_size_t) __nocapture(2);
+
+ #define __HAVE_ARCH_MEMCHR
+-extern void *memchr(const void *, int, __kernel_size_t);
++extern void *memchr(const void *, int, __kernel_size_t) __nocapture(-1);
+
+ #define __HAVE_ARCH_MEMSET
+ extern void *memset(void *, int, __kernel_size_t);
+ extern void *__memset(void *, int, __kernel_size_t);
+
+ #define __HAVE_ARCH_MEMCMP
+-extern int memcmp(const void *, const void *, size_t);
++extern int memcmp(const void *, const void *, size_t) __nocapture(1, 2);
+
+
+ #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
+index 55d0adb..b986918 100644
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -110,6 +110,7 @@ static inline void set_fs(mm_segment_t fs)
+ */
+ #define untagged_addr(addr) sign_extend64(addr, 55)
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) __range_ok(addr, size)
+ #define user_addr_max get_fs
+
+@@ -279,6 +280,9 @@ static inline unsigned long __must_check __copy_from_user(void *to, const void _
+
+ static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ kasan_check_read(from, n);
+ check_object_size(from, n, true);
+ return __arch_copy_to_user(to, from, n);
+@@ -287,6 +291,10 @@ static inline unsigned long __must_check __copy_to_user(void __user *to, const v
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ unsigned long res = n;
++
++ if ((long)n < 0)
++ return n;
++
+ kasan_check_write(to, n);
+
+ if (access_ok(VERIFY_READ, from, n)) {
+@@ -300,6 +308,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ kasan_check_read(from, n);
+
+ if (access_ok(VERIFY_WRITE, to, n)) {
+diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
+index d55a7b0..d8dbd8a 100644
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -198,7 +198,7 @@ EXPORT_SYMBOL(arch_hibernation_header_restore);
+ static int create_safe_exec_page(void *src_start, size_t length,
+ unsigned long dst_addr,
+ phys_addr_t *phys_dst_addr,
+- void *(*allocator)(gfp_t mask),
++ unsigned long (*allocator)(gfp_t mask),
+ gfp_t mask)
+ {
+ int rc = 0;
+@@ -206,7 +206,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+- unsigned long dst = (unsigned long)allocator(mask);
++ unsigned long dst = allocator(mask);
+
+ if (!dst) {
+ rc = -ENOMEM;
+@@ -216,9 +216,9 @@ static int create_safe_exec_page(void *src_start, size_t length,
+ memcpy((void *)dst, src_start, length);
+ flush_icache_range(dst, dst + length);
+
+- pgd = pgd_offset_raw(allocator(mask), dst_addr);
++ pgd = pgd_offset_raw((pgd_t *)allocator(mask), dst_addr);
+ if (pgd_none(*pgd)) {
+- pud = allocator(mask);
++ pud = (pud_t *)allocator(mask);
+ if (!pud) {
+ rc = -ENOMEM;
+ goto out;
+@@ -228,7 +228,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+
+ pud = pud_offset(pgd, dst_addr);
+ if (pud_none(*pud)) {
+- pmd = allocator(mask);
++ pmd = (pmd_t *)allocator(mask);
+ if (!pmd) {
+ rc = -ENOMEM;
+ goto out;
+@@ -238,7 +238,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+
+ pmd = pmd_offset(pud, dst_addr);
+ if (pmd_none(*pmd)) {
+- pte = allocator(mask);
++ pte = (pte_t *)allocator(mask);
+ if (!pte) {
+ rc = -ENOMEM;
+ goto out;
+@@ -510,7 +510,7 @@ int swsusp_arch_resume(void)
+ rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
+ (unsigned long)hibernate_exit,
+ &phys_hibernate_exit,
+- (void *)get_safe_page, GFP_ATOMIC);
++ get_safe_page, GFP_ATOMIC);
+ if (rc) {
+ pr_err("Failed to create safe executable page for hibernate_exit code.");
+ goto out;
+diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
+index f5077ea..46b4664 100644
+--- a/arch/arm64/kernel/probes/kprobes.c
++++ b/arch/arm64/kernel/probes/kprobes.c
+@@ -639,6 +639,7 @@ void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
+ return (void *)orig_ret_address;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -652,6 +653,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ return 0;
+ }
++#endif
+
+ int __init arch_init_kprobes(void)
+ {
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 01753cd..b65d17a 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -64,7 +64,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
+ /*
+ * Function pointers to optional machine specific functions
+ */
+-void (*pm_power_off)(void);
++void (* pm_power_off)(void);
+ EXPORT_SYMBOL_GPL(pm_power_off);
+
+ void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+@@ -110,7 +110,7 @@ void machine_shutdown(void)
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this.
+ */
+-void machine_halt(void)
++void __noreturn machine_halt(void)
+ {
+ local_irq_disable();
+ smp_send_stop();
+@@ -123,12 +123,13 @@ void machine_halt(void)
+ * achieves this. When the system power is turned off, it will take all CPUs
+ * with it.
+ */
+-void machine_power_off(void)
++void __noreturn machine_power_off(void)
+ {
+ local_irq_disable();
+ smp_send_stop();
+ if (pm_power_off)
+ pm_power_off();
++ while(1);
+ }
+
+ /*
+@@ -140,7 +141,7 @@ void machine_power_off(void)
+ * executing pre-reset code, and using RAM that the primary CPU's code wishes
+ * to use. Implementing such co-ordination would be essentially impossible.
+ */
+-void machine_restart(char *cmd)
++void __noreturn machine_restart(char *cmd)
+ {
+ /* Disable interrupts first */
+ local_irq_disable();
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index c2efddf..c58e0a2 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -95,8 +95,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
+ struct pt_regs *irq_args;
+ unsigned long orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
+
+- if (object_is_on_stack((void *)orig_sp) &&
+- object_is_on_stack((void *)frame->fp)) {
++ if (object_starts_on_stack((void *)orig_sp) &&
++ object_starts_on_stack((void *)frame->fp)) {
+ frame->sp = orig_sp;
+
+ /* orig_sp is the saved pt_regs, find the elr */
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 11e5eae..d8cdfa7 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -547,7 +547,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
+ __show_regs(regs);
+ }
+
+- return sys_ni_syscall();
++ return -ENOSYS;
+ }
+
+ static const char *esr_class_str[] = {
+diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
+index c3a58a1..78fbf54 100644
+--- a/arch/avr32/include/asm/cache.h
++++ b/arch/avr32/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef __ASM_AVR32_CACHE_H
+ #define __ASM_AVR32_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
+index 0388ece..87c8df1 100644
+--- a/arch/avr32/include/asm/elf.h
++++ b/arch/avr32/include/asm/elf.h
+@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
+index 479330b..53717a8 100644
+--- a/arch/avr32/include/asm/kmap_types.h
++++ b/arch/avr32/include/asm/kmap_types.h
+@@ -2,9 +2,9 @@
+ #define __ASM_AVR32_KMAP_TYPES_H
+
+ #ifdef CONFIG_DEBUG_HIGHMEM
+-# define KM_TYPE_NR 29
++# define KM_TYPE_NR 30
+ #else
+-# define KM_TYPE_NR 14
++# define KM_TYPE_NR 15
+ #endif
+
+ #endif /* __ASM_AVR32_KMAP_TYPES_H */
+diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
+index b3977e9..4230c51a 100644
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
+
+ int exception_trace = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+@@ -178,6 +195,16 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
+index f3337ee..15b6f8d 100644
+--- a/arch/blackfin/Kconfig.debug
++++ b/arch/blackfin/Kconfig.debug
+@@ -18,6 +18,7 @@ config DEBUG_VERBOSE
+ config DEBUG_MMRS
+ tristate "Generate Blackfin MMR tree"
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ help
+ Create a tree of Blackfin MMRs via the debugfs tree. If
+ you enable this, you will find all MMRs laid out in the
+diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
+index 568885a..f8008df 100644
+--- a/arch/blackfin/include/asm/cache.h
++++ b/arch/blackfin/include/asm/cache.h
+@@ -7,6 +7,7 @@
+ #ifndef __ARCH_BLACKFIN_CACHE_H
+ #define __ARCH_BLACKFIN_CACHE_H
+
++#include <linux/const.h>
+ #include <linux/linkage.h> /* for asmlinkage */
+
+ /*
+@@ -14,7 +15,7 @@
+ * Blackfin loads 32 bytes for cache
+ */
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
+index aea2718..3639a60 100644
+--- a/arch/cris/include/arch-v10/arch/cache.h
++++ b/arch/cris/include/arch-v10/arch/cache.h
+@@ -1,8 +1,9 @@
+ #ifndef _ASM_ARCH_CACHE_H
+ #define _ASM_ARCH_CACHE_H
+
++#include <linux/const.h>
+ /* Etrax 100LX have 32-byte cache-lines. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_ARCH_CACHE_H */
+diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
+index 7caf25d..ee65ac5 100644
+--- a/arch/cris/include/arch-v32/arch/cache.h
++++ b/arch/cris/include/arch-v32/arch/cache.h
+@@ -1,11 +1,12 @@
+ #ifndef _ASM_CRIS_ARCH_CACHE_H
+ #define _ASM_CRIS_ARCH_CACHE_H
+
++#include <linux/const.h>
+ #include <arch/hwregs/dma.h>
+
+ /* A cache-line is 32 bytes. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
+index 1c2a5e2..2579e5f 100644
+--- a/arch/frv/include/asm/atomic.h
++++ b/arch/frv/include/asm/atomic.h
+@@ -146,6 +146,16 @@ static inline void atomic64_dec(atomic64_t *v)
+ #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
+ #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+ int c, old;
+diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
+index 2797163..c2a401df9 100644
+--- a/arch/frv/include/asm/cache.h
++++ b/arch/frv/include/asm/cache.h
+@@ -12,10 +12,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+ #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
+index 43901f2..0d8b865 100644
+--- a/arch/frv/include/asm/kmap_types.h
++++ b/arch/frv/include/asm/kmap_types.h
+@@ -2,6 +2,6 @@
+ #ifndef _ASM_KMAP_TYPES_H
+ #define _ASM_KMAP_TYPES_H
+
+-#define KM_TYPE_NR 17
++#define KM_TYPE_NR 18
+
+ #endif
+diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
+index 836f1470..4cf23f5 100644
+--- a/arch/frv/mm/elf-fdpic.c
++++ b/arch/frv/mm/elf-fdpic.c
+@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ {
+ struct vm_area_struct *vma;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ goto success;
+ }
+
+@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ info.high_limit = (current->mm->start_stack - 0x00200000);
+ info.align_mask = 0;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ goto success;
+diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
+index 69952c18..4fa2908 100644
+--- a/arch/hexagon/include/asm/cache.h
++++ b/arch/hexagon/include/asm/cache.h
+@@ -21,9 +21,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
++
+ /* Bytes per L1 cache line */
+-#define L1_CACHE_SHIFT (5)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
+index 18ca6a9..77b0e0d 100644
+--- a/arch/ia64/Kconfig
++++ b/arch/ia64/Kconfig
+@@ -519,6 +519,7 @@ config KEXEC
+ bool "kexec system call"
+ depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
+index c100d78..c44d46d 100644
+--- a/arch/ia64/Makefile
++++ b/arch/ia64/Makefile
+@@ -98,5 +98,6 @@ endef
+ archprepare: make_nr_irqs_h
+ PHONY += make_nr_irqs_h
+
++GCC_PLUGINS_make_nr_irqs_h := n
+ make_nr_irqs_h:
+ $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
+diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
+index 65d4bb2..8b2e661 100644
+--- a/arch/ia64/include/asm/atomic.h
++++ b/arch/ia64/include/asm/atomic.h
+@@ -323,4 +323,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
+ #define atomic64_inc(v) atomic64_add(1, (v))
+ #define atomic64_dec(v) atomic64_sub(1, (v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* _ASM_IA64_ATOMIC_H */
+diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
+index 988254a..e1ee885 100644
+--- a/arch/ia64/include/asm/cache.h
++++ b/arch/ia64/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_IA64_CACHE_H
+ #define _ASM_IA64_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+@@ -9,7 +10,7 @@
+
+ /* Bytes per L1 (data) cache line. */
+ #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #ifdef CONFIG_SMP
+ # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
+index 5a83c5c..4d7f553 100644
+--- a/arch/ia64/include/asm/elf.h
++++ b/arch/ia64/include/asm/elf.h
+@@ -42,6 +42,13 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND 0x70000001
+
+ /* IA-64 relocations: */
+diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
+index f5e70e9..624fad5 100644
+--- a/arch/ia64/include/asm/pgalloc.h
++++ b/arch/ia64/include/asm/pgalloc.h
+@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+ pgd_val(*pgd_entry) = __pa(pud);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
++{
++ pgd_populate(mm, pgd_entry, pud);
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+ pud_val(*pud_entry) = __pa(pmd);
+ }
+
++static inline void
++pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
++{
++ pud_populate(mm, pud_entry, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
+index 9f3ed9e..c99b418 100644
+--- a/arch/ia64/include/asm/pgtable.h
++++ b/arch/ia64/include/asm/pgtable.h
+@@ -12,7 +12,7 @@
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+-
++#include <linux/const.h>
+ #include <asm/mman.h>
+ #include <asm/page.h>
+ #include <asm/processor.h>
+@@ -139,6 +139,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
+index ca9e761..40dffaf 100644
+--- a/arch/ia64/include/asm/spinlock.h
++++ b/arch/ia64/include/asm/spinlock.h
+@@ -73,7 +73,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+ unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
+
+ asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
+ }
+
+ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
+index bfe1319..da0014b 100644
+--- a/arch/ia64/include/asm/uaccess.h
++++ b/arch/ia64/include/asm/uaccess.h
+@@ -70,6 +70,7 @@
+ && ((segment).seg == KERNEL_DS.seg \
+ || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
+ })
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
+
+ /*
+@@ -241,17 +242,23 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
+ static inline unsigned long
+ __copy_to_user (void __user *to, const void *from, unsigned long count)
+ {
++ if (count > INT_MAX)
++ return count;
++
+ check_object_size(from, count, true);
+
+- return __copy_user(to, (__force void __user *) from, count);
++ return __copy_user(to, (void __force_user *) from, count);
+ }
+
+ static inline unsigned long
+ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ {
++ if (count > INT_MAX)
++ return count;
++
+ check_object_size(to, count, false);
+
+- return __copy_user((__force void __user *) to, from, count);
++ return __copy_user((void __force_user *) to, from, count);
+ }
+
+ #define __copy_to_user_inatomic __copy_to_user
+@@ -260,11 +267,11 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({ \
+ void __user *__cu_to = (to); \
+ const void *__cu_from = (from); \
+- long __cu_len = (n); \
++ unsigned long __cu_len = (n); \
+ \
+- if (__access_ok(__cu_to, __cu_len, get_fs())) { \
+- check_object_size(__cu_from, __cu_len, true); \
+- __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
++ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
++ check_object_size(__cu_from, __cu_len, true); \
++ __cu_len = __copy_user(__cu_to, (void __force_user *) __cu_from, __cu_len); \
+ } \
+ __cu_len; \
+ })
+@@ -272,10 +279,10 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- check_object_size(to, n, false);
+- if (likely(__access_ok(from, n, get_fs())))
+- n = __copy_user((__force void __user *) to, from, n);
+- else
++ if (likely(__access_ok(from, n, get_fs()))) {
++ check_object_size(to, n, false);
++ n = __copy_user((void __force_user *) to, from, n);
++ } else if ((long)n > 0)
+ memset(to, 0, n);
+ return n;
+ }
+diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
+index c7c5144..7e31461 100644
+--- a/arch/ia64/kernel/kprobes.c
++++ b/arch/ia64/kernel/kprobes.c
+@@ -499,6 +499,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+ return 1;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -507,6 +508,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip;
+ }
++#endif
+
+ /* Check the instruction in the slot is break */
+ static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot)
+@@ -1119,6 +1121,7 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline_p);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr ==
+@@ -1127,3 +1130,4 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 6ab0ae7..88f1b60 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -486,13 +486,13 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
+ static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
++ return within_module_init(addr, mod);
+ }
+
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
++ return within_module_core(addr, mod);
+ }
+
+ static inline int
+@@ -676,6 +676,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
+
+ case RV_BDREL:
+ val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
++ if (within_module_rx(val, &mod->init_layout))
++ val -= mod->init_layout.base_rx;
++ else if (within_module_rw(val, &mod->init_layout))
++ val -= mod->init_layout.base_rw;
++ else if (within_module_rx(val, &mod->core_layout))
++ val -= mod->core_layout.base_rx;
++ else if (within_module_rw(val, &mod->core_layout))
++ val -= mod->core_layout.base_rw;
+ break;
+
+ case RV_LTV:
+@@ -810,15 +818,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
+ * addresses have been selected...
+ */
+ uint64_t gp;
+- if (mod->core_layout.size > MAX_LTOFF)
++ if (mod->core_layout.size_rx + mod->core_layout.size_rw > MAX_LTOFF)
+ /*
+ * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+ * at the end of the module.
+ */
+- gp = mod->core_layout.size - MAX_LTOFF / 2;
++ gp = mod->core_layout.size_rx + mod->core_layout.size_rw - MAX_LTOFF / 2;
+ else
+- gp = mod->core_layout.size / 2;
+- gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
++ gp = (mod->core_layout.size_rx + mod->core_layout.size_rw) / 2;
++ gp = (uint64_t) mod->core_layout.base_rx + ((gp + 7) & -8);
+ mod->arch.gp = gp;
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+ }
+diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
+index c39c3cd..3c77738 100644
+--- a/arch/ia64/kernel/palinfo.c
++++ b/arch/ia64/kernel/palinfo.c
+@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __refdata palinfo_cpu_notifier =
++static struct notifier_block palinfo_cpu_notifier =
+ {
+ .notifier_call = palinfo_cpu_callback,
+ .priority = 0,
+diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
+index 41e33f8..65180b2a 100644
+--- a/arch/ia64/kernel/sys_ia64.c
++++ b/arch/ia64/kernel/sys_ia64.c
+@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ unsigned long align_mask = 0;
+ struct mm_struct *mm = current->mm;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ info.high_limit = TASK_SIZE;
+ info.align_mask = align_mask;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
+index f89d20c..410a1b1 100644
+--- a/arch/ia64/kernel/vmlinux.lds.S
++++ b/arch/ia64/kernel/vmlinux.lds.S
+@@ -172,7 +172,7 @@ SECTIONS {
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
+- __phys_per_cpu_start = __per_cpu_load;
++ __phys_per_cpu_start = per_cpu_load;
+ /*
+ * ensure percpu data fits
+ * into percpu page size
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index fa6ad95..b46bd89 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ # define VM_READ_BIT 0
+ # define VM_WRITE_BIT 1
+ # define VM_EXEC_BIT 2
+@@ -151,8 +168,21 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
+ if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
+ goto bad_area;
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index 85de86d..db7f6b8 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -138,6 +138,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ unsigned long pgoff, unsigned long flags)
+ {
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
+
+ if (len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+@@ -161,6 +162,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
+ info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
+index 1841ef6..74d8330 100644
+--- a/arch/ia64/mm/init.c
++++ b/arch/ia64/mm/init.c
+@@ -119,6 +119,19 @@ ia64_init_addr_space (void)
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++ vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ down_write(&current->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+@@ -279,7 +292,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+
+ return 0;
+ }
+diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
+index 40b3ee98..8c2c112 100644
+--- a/arch/m32r/include/asm/cache.h
++++ b/arch/m32r/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef _ASM_M32R_CACHE_H
+ #define _ASM_M32R_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_M32R_CACHE_H */
+diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
+index 82abd15..d95ae5d 100644
+--- a/arch/m32r/lib/usercopy.c
++++ b/arch/m32r/lib/usercopy.c
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to,from,n);
+diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
+index 0395c51..5f26031 100644
+--- a/arch/m68k/include/asm/cache.h
++++ b/arch/m68k/include/asm/cache.h
+@@ -4,9 +4,11 @@
+ #ifndef __ARCH_M68K_CACHE_H
+ #define __ARCH_M68K_CACHE_H
+
++#include <linux/const.h>
++
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
+index 4e5aa2f..172c469 100644
+--- a/arch/m68k/kernel/time.c
++++ b/arch/m68k/kernel/time.c
+@@ -107,6 +107,7 @@ static int rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+
+ switch (cmd) {
+ case RTC_PLL_GET:
++ memset(&pll, 0, sizeof(pll));
+ if (!mach_get_rtc_pll || mach_get_rtc_pll(&pll))
+ return -EINVAL;
+ return copy_to_user(argp, &pll, sizeof pll) ? -EFAULT : 0;
+diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
+index db1b7da..8e13684 100644
+--- a/arch/metag/mm/hugetlbpage.c
++++ b/arch/metag/mm/hugetlbpage.c
+@@ -189,6 +189,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
+ info.high_limit = TASK_SIZE;
+ info.align_mask = PAGE_MASK & HUGEPT_MASK;
+ info.align_offset = 0;
++ info.threadstack_offset = 0;
+ return vm_unmapped_area(&info);
+ }
+
+diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
+index 4efe96a..60e8699 100644
+--- a/arch/microblaze/include/asm/cache.h
++++ b/arch/microblaze/include/asm/cache.h
+@@ -13,11 +13,12 @@
+ #ifndef _ASM_MICROBLAZE_CACHE_H
+ #define _ASM_MICROBLAZE_CACHE_H
+
++#include <linux/const.h>
+ #include <asm/registers.h>
+
+ #define L1_CACHE_SHIFT 5
+ /* word-granular cache in microblaze */
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
+index 5c3f688..f8cc1b3 100644
+--- a/arch/mips/Kbuild
++++ b/arch/mips/Kbuild
+@@ -1,7 +1,7 @@
+ # Fail on warnings - also for files referenced in subdirs
+ # -Werror can be disabled for specific files using:
+ # CFLAGS_<file.o> := -Wno-error
+-subdir-ccflags-y := -Werror
++# subdir-ccflags-y := -Werror
+
+ # platform specific definitions
+ include arch/mips/Kbuild.platforms
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 5a4f2eb..93c4de6 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -50,6 +50,7 @@ config MIPS
+ select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_NMI
+ select VIRT_TO_BUS
++ select HAVE_GCC_PLUGINS
+ select MODULES_USE_ELF_REL if MODULES
+ select MODULES_USE_ELF_RELA if MODULES && 64BIT
+ select CLONE_BACKWARDS
+@@ -2596,7 +2597,7 @@ config RELOCATION_TABLE_SIZE
+
+ config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
+- depends on RELOCATABLE
++ depends on RELOCATABLE && BROKEN_SECURITY
+ ---help---
+ Randomizes the physical and virtual address at which the
+ kernel image is loaded, as a security feature that
+@@ -2812,6 +2813,7 @@ source "kernel/Kconfig.preempt"
+ config KEXEC
+ bool "Kexec system call"
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
+index 0ab176b..c4469a4 100644
+--- a/arch/mips/include/asm/atomic.h
++++ b/arch/mips/include/asm/atomic.h
+@@ -22,15 +22,39 @@
+ #include <asm/cmpxchg.h>
+ #include <asm/war.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
++#ifdef CONFIG_64BIT
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++" .dword " #from ", " #to"\n" \
++" .previous\n"
++#else
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++" .word " #from ", " #to"\n" \
++" .previous\n"
++#endif
++
+ /*
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+-#define atomic_read(v) READ_ONCE((v)->counter)
++static inline int atomic_read(const atomic_t *v)
++{
++ return READ_ONCE(v->counter);
++}
++
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return READ_ONCE(v->counter);
++}
+
+ /*
+ * atomic_set - set atomic variable
+@@ -39,47 +63,77 @@
+ *
+ * Atomically sets the value of @v to @i.
+ */
+-#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
++static inline void atomic_set(atomic_t *v, int i)
++{
++ WRITE_ONCE(v->counter, i);
++}
+
+-#define ATOMIC_OP(op, c_op, asm_op) \
+-static __inline__ void atomic_##op(int i, atomic_t * v) \
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ WRITE_ONCE(v->counter, i);
++}
++
++#ifdef CONFIG_PAX_REFCOUNT
++#define __OVERFLOW_POST \
++ " b 4f \n" \
++ " .set noreorder \n" \
++ "3: b 5f \n" \
++ " move %0, %1 \n" \
++ " .set reorder \n"
++#define __OVERFLOW_EXTABLE \
++ "3:\n" \
++ _ASM_EXTABLE(2b, 3b)
++#else
++#define __OVERFLOW_POST
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, asm_op, extable) \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
+ { \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
+- "1: ll %0, %1 # atomic_" #op " \n" \
+- " " #asm_op " %0, %2 \n" \
++ " .set mips3 \n" \
++ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
+ " sc %0, %1 \n" \
+ " beqzl %0, 1b \n" \
++ extable \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " ll %0, %1 # atomic_" #op "\n" \
+- " " #asm_op " %0, %2 \n" \
+- " sc %0, %1 \n" \
+- " .set mips0 \n" \
+- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
+- } while (unlikely(!temp)); \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
++ " sc %0, %1 \n" \
++ " beqz %0, 1b \n" \
++ extable \
++ " .set mips0 \n" \
++ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- v->counter c_op i; \
++ __asm__ __volatile__( \
++ "2: " #asm_op " %0, %1 \n" \
++ extable \
++ : "+r" (v->counter) : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ }
+
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+-static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
++#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
++ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
++static inline int atomic_##op##_return##suffix##_relaxed(int i, atomic##suffix##_t * v) \
+ { \
+ int result; \
+ \
+@@ -87,12 +141,15 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
+- "1: ll %1, %2 # atomic_" #op "_return \n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ " .set mips3 \n" \
++ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
++ "2: " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+@@ -100,32 +157,40 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " ll %1, %2 # atomic_" #op "_return \n" \
+- " " #asm_op " %0, %1, %3 \n" \
+- " sc %0, %2 \n" \
+- " .set mips0 \n" \
+- : "=&r" (result), "=&r" (temp), \
+- "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
+- } while (unlikely(!result)); \
+- \
+- result = temp; result c_op i; \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
++ "2: " #asm_op " %0, %1, %3 \n" \
++ " sc %0, %2 \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
++ " .set mips0 \n" \
++ : "=&r" (result), "=&r" (temp), \
++ "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- result = v->counter; \
+- result c_op i; \
+- v->counter = result; \
++ __asm__ __volatile__( \
++ " lw %0, %1 \n" \
++ "2: " #asm_op " %0, %1, %2 \n" \
++ " sw %0, %1 \n" \
++ "3: \n" \
++ extable \
++ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ return result; \
+ }
+
++#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
++ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
+ { \
+@@ -173,13 +238,13 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
+ return result; \
+ }
+
+-#define ATOMIC_OPS(op, c_op, asm_op) \
+- ATOMIC_OP(op, c_op, asm_op) \
+- ATOMIC_OP_RETURN(op, c_op, asm_op) \
+- ATOMIC_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC_OPS(op, asm_op) \
++ ATOMIC_OP(op, asm_op) \
++ ATOMIC_OP_RETURN(op, asm_op) \
++ ATOMIC_FETCH_OP(op, asm_op)
+
+-ATOMIC_OPS(add, +=, addu)
+-ATOMIC_OPS(sub, -=, subu)
++ATOMIC_OPS(add, addu)
++ATOMIC_OPS(sub, subu)
+
+ #define atomic_add_return_relaxed atomic_add_return_relaxed
+ #define atomic_sub_return_relaxed atomic_sub_return_relaxed
+@@ -187,13 +252,13 @@ ATOMIC_OPS(sub, -=, subu)
+ #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+ #undef ATOMIC_OPS
+-#define ATOMIC_OPS(op, c_op, asm_op) \
+- ATOMIC_OP(op, c_op, asm_op) \
+- ATOMIC_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC_OPS(op, asm_op) \
++ ATOMIC_OP(op, asm_op) \
++ ATOMIC_FETCH_OP(op, asm_op)
+
+-ATOMIC_OPS(and, &=, and)
+-ATOMIC_OPS(or, |=, or)
+-ATOMIC_OPS(xor, ^=, xor)
++ATOMIC_OPS(and, and)
++ATOMIC_OPS(or, or)
++ATOMIC_OPS(xor, xor)
+
+ #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+ #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+@@ -202,7 +267,9 @@ ATOMIC_OPS(xor, ^=, xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ /*
+ * atomic_sub_if_positive - conditionally subtract integer from atomic variable
+@@ -212,7 +279,7 @@ ATOMIC_OPS(xor, ^=, xor)
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
++static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
+ {
+ int result;
+
+@@ -222,7 +289,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ int temp;
+
+ __asm__ __volatile__(
+- " .set arch=r4000 \n"
++ " .set "MIPS_ISA_LEVEL" \n"
+ "1: ll %1, %2 # atomic_sub_if_positive\n"
+ " subu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+@@ -271,8 +338,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ return result;
+ }
+
+-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
++ int new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline int atomic_xchg(atomic_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&(v->counter), new);
++}
+
+ /**
+ * __atomic_add_unless - add unless the number is a given value
+@@ -300,6 +385,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+
+ #define atomic_dec_return(v) atomic_sub_return(1, (v))
+ #define atomic_inc_return(v) atomic_add_return(1, (v))
++static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+
+ /*
+ * atomic_sub_and_test - subtract value from variable and test result
+@@ -321,6 +410,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v) == 0;
++}
+
+ /*
+ * atomic_dec_and_test - decrement by 1 and test
+@@ -345,6 +438,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically increments @v by 1.
+ */
+ #define atomic_inc(v) atomic_add(1, (v))
++static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+
+ /*
+ * atomic_dec - decrement and test
+@@ -353,6 +450,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically decrements @v by 1.
+ */
+ #define atomic_dec(v) atomic_sub(1, (v))
++static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+
+ /*
+ * atomic_add_negative - add and test if negative
+@@ -374,54 +475,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * @v: pointer of type atomic64_t
+ *
+ */
+-#define atomic64_read(v) READ_ONCE((v)->counter)
++static inline long atomic64_read(const atomic64_t *v)
++{
++ return READ_ONCE(v->counter);
++}
++
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return READ_ONCE(v->counter);
++}
+
+ /*
+ * atomic64_set - set atomic variable
+ * @v: pointer of type atomic64_t
+ * @i: required value
+ */
+-#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
++static inline void atomic64_set(atomic64_t *v, long i)
++{
++ WRITE_ONCE(v->counter, i);
++}
+
+-#define ATOMIC64_OP(op, c_op, asm_op) \
+-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ WRITE_ONCE(v->counter, i);
++}
++
++#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
++static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
+ { \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
+- "1: lld %0, %1 # atomic64_" #op " \n" \
+- " " #asm_op " %0, %2 \n" \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
+ " scd %0, %1 \n" \
+ " beqzl %0, 1b \n" \
++ extable \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " lld %0, %1 # atomic64_" #op "\n" \
+- " " #asm_op " %0, %2 \n" \
+- " scd %0, %1 \n" \
+- " .set mips0 \n" \
+- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
+- } while (unlikely(!temp)); \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
++ " scd %0, %1 \n" \
++ " beqz %0, 1b \n" \
++ extable \
++ " .set mips0 \n" \
++ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- v->counter c_op i; \
++ __asm__ __volatile__( \
++ "2: " #asm_op " %0, %1 \n" \
++ extable \
++ : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ }
+
+-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+-static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
++#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
++ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
++static inline long atomic64_##op##_return##suffix##_relaxed(long i, atomic64##suffix##_t * v)\
+ { \
+ long result; \
+ \
+@@ -429,12 +553,15 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
++ " .set mips3 \n" \
+ "1: lld %1, %2 # atomic64_" #op "_return\n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ "2: " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+@@ -442,33 +569,42 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " lld %1, %2 # atomic64_" #op "_return\n" \
+- " " #asm_op " %0, %1, %3 \n" \
+- " scd %0, %2 \n" \
+- " .set mips0 \n" \
+- : "=&r" (result), "=&r" (temp), \
+- "=" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
+- : "memory"); \
+- } while (unlikely(!result)); \
+- \
+- result = temp; result c_op i; \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
++ "2: " #asm_op " %0, %1, %3 \n" \
++ " scd %0, %2 \n" \
++ " beqz %0, 1b \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
++ " .set mips0 \n" \
++ : "=&r" (result), "=&r" (temp), \
++ "=" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
++ : "memory"); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- result = v->counter; \
+- result c_op i; \
+- v->counter = result; \
++ __asm__ __volatile__( \
++ " ld %0, %1 \n" \
++ "2: " #asm_op " %0, %1, %2 \n" \
++ " sd %0, %1 \n" \
++ "3: \n" \
++ extable \
++ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+ return result; \
+ }
+
++#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
++ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
+ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+ { \
+@@ -517,13 +653,13 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+ return result; \
+ }
+
+-#define ATOMIC64_OPS(op, c_op, asm_op) \
+- ATOMIC64_OP(op, c_op, asm_op) \
+- ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+- ATOMIC64_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC64_OPS(op, asm_op) \
++ ATOMIC64_OP(op, asm_op) \
++ ATOMIC64_OP_RETURN(op, asm_op) \
++ ATOMIC64_FETCH_OP(op, asm_op)
+
+-ATOMIC64_OPS(add, +=, daddu)
+-ATOMIC64_OPS(sub, -=, dsubu)
++ATOMIC64_OPS(add, daddu)
++ATOMIC64_OPS(sub, dsubu)
+
+ #define atomic64_add_return_relaxed atomic64_add_return_relaxed
+ #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+@@ -531,13 +667,13 @@ ATOMIC64_OPS(sub, -=, dsubu)
+ #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
+ #undef ATOMIC64_OPS
+-#define ATOMIC64_OPS(op, c_op, asm_op) \
+- ATOMIC64_OP(op, c_op, asm_op) \
+- ATOMIC64_FETCH_OP(op, c_op, asm_op)
++#define ATOMIC64_OPS(op, asm_op) \
++ ATOMIC64_OP(op, asm_op) \
++ ATOMIC64_FETCH_OP(op, asm_op)
+
+-ATOMIC64_OPS(and, &=, and)
+-ATOMIC64_OPS(or, |=, or)
+-ATOMIC64_OPS(xor, ^=, xor)
++ATOMIC64_OPS(and, and)
++ATOMIC64_OPS(or, or)
++ATOMIC64_OPS(xor, xor)
+
+ #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+ #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+@@ -546,7 +682,11 @@ ATOMIC64_OPS(xor, ^=, xor)
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_FETCH_OP
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST
+
+ /*
+ * atomic64_sub_if_positive - conditionally subtract integer from atomic
+@@ -557,7 +697,7 @@ ATOMIC64_OPS(xor, ^=, xor)
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
++static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
+ {
+ long result;
+
+@@ -567,7 +707,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+ long temp;
+
+ __asm__ __volatile__(
+- " .set arch=r4000 \n"
++ " .set "MIPS_ISA_LEVEL" \n"
+ "1: lld %1, %2 # atomic64_sub_if_positive\n"
+ " dsubu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+@@ -616,9 +756,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+ return result;
+ }
+
+-#define atomic64_cmpxchg(v, o, n) \
+- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
++ long new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline long atomic64_xchg(atomic64_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
++
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&(v->counter), new);
++}
+
+ /**
+ * atomic64_add_unless - add unless the number is a given value
+@@ -648,6 +805,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+ #define atomic64_inc_return(v) atomic64_add_return(1, (v))
++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
+
+ /*
+ * atomic64_sub_and_test - subtract value from variable and test result
+@@ -669,6 +827,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * other cases.
+ */
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
++#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
+
+ /*
+ * atomic64_dec_and_test - decrement by 1 and test
+@@ -693,6 +852,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * Atomically increments @v by 1.
+ */
+ #define atomic64_inc(v) atomic64_add(1, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
+
+ /*
+ * atomic64_dec - decrement and test
+@@ -701,6 +861,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * Atomically decrements @v by 1.
+ */
+ #define atomic64_dec(v) atomic64_sub(1, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
+
+ /*
+ * atomic64_add_negative - add and test if negative
+diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
+index b4db69f..8f3b093 100644
+--- a/arch/mips/include/asm/cache.h
++++ b/arch/mips/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #ifndef _ASM_CACHE_H
+ #define _ASM_CACHE_H
+
++#include <linux/const.h>
+ #include <kmalloc.h>
+
+ #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
+index 2b3dc29..1f7bdc4 100644
+--- a/arch/mips/include/asm/elf.h
++++ b/arch/mips/include/asm/elf.h
+@@ -458,6 +458,13 @@ extern const char *__elf_platform;
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
+ #define ARCH_DLINFO \
+ do { \
+diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
+index c1f6afa..38cc6e9 100644
+--- a/arch/mips/include/asm/exec.h
++++ b/arch/mips/include/asm/exec.h
+@@ -12,6 +12,6 @@
+ #ifndef _ASM_EXEC_H
+ #define _ASM_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_EXEC_H */
+diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
+index 9e8ef59..1139d6b 100644
+--- a/arch/mips/include/asm/hw_irq.h
++++ b/arch/mips/include/asm/hw_irq.h
+@@ -10,7 +10,7 @@
+
+ #include <linux/atomic.h>
+
+-extern atomic_t irq_err_count;
++extern atomic_unchecked_t irq_err_count;
+
+ /*
+ * interrupt-retrigger: NOP for now. This may not be appropriate for all
+diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
+index 956db6e..05a58b1 100644
+--- a/arch/mips/include/asm/irq.h
++++ b/arch/mips/include/asm/irq.h
+@@ -11,7 +11,6 @@
+
+ #include <linux/linkage.h>
+ #include <linux/smp.h>
+-#include <linux/irqdomain.h>
+
+ #include <asm/mipsmtregs.h>
+
+diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
+index 8feaed6..1bd8a64 100644
+--- a/arch/mips/include/asm/local.h
++++ b/arch/mips/include/asm/local.h
+@@ -13,15 +13,25 @@ typedef struct
+ atomic_long_t a;
+ } local_t;
+
++typedef struct {
++ atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l, i) atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
+
+ #define local_add(i, l) atomic_long_add((i), (&(l)->a))
++#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
+ #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
++#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
+ #define local_inc(l) atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l) atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
+
+ /*
+ * Same as above, but return the result value
+@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
+ return result;
+ }
+
++static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
++{
++ unsigned long result;
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ unsigned long temp;
++
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1:" __LL "%1, %2 # local_add_return \n"
++ " addu %0, %1, %3 \n"
++ __SC "%0, %2 \n"
++ " beqzl %0, 1b \n"
++ " addu %0, %1, %3 \n"
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
++ : "Ir" (i), "m" (l->a.counter)
++ : "memory");
++ } else if (kernel_uses_llsc) {
++ unsigned long temp;
++
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1:" __LL "%1, %2 # local_add_return \n"
++ " addu %0, %1, %3 \n"
++ __SC "%0, %2 \n"
++ " beqz %0, 1b \n"
++ " addu %0, %1, %3 \n"
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
++ : "Ir" (i), "m" (l->a.counter)
++ : "memory");
++ } else {
++ unsigned long flags;
++
++ local_irq_save(flags);
++ result = l->a.counter;
++ result += i;
++ l->a.counter = result;
++ local_irq_restore(flags);
++ }
++
++ return result;
++}
++
+ static __inline__ long local_sub_return(long i, local_t * l)
+ {
+ unsigned long result;
+@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
+
+ #define local_cmpxchg(l, o, n) \
+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+ #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
+
+ /**
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index 5f98759..a3a7cb2 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -118,7 +118,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
+index a03e869..e1928f5 100644
+--- a/arch/mips/include/asm/pgalloc.h
++++ b/arch/mips/include/asm/pgalloc.h
+@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+ set_pud(pud, __pud((unsigned long)pmd));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
+ #endif
+
+ /*
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 9e9e94415..43354f5 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -20,6 +20,9 @@
+ #include <asm/io.h>
+ #include <asm/pgtable-bits.h>
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ struct mm_struct;
+ struct vm_area_struct;
+
+diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
+index e309d8f..20eefec 100644
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -101,6 +101,9 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
+ #define TIF_UPROBE 6 /* breakpointed or singlestepping */
+ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
++/* li takes a 32bit immediate */
++#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
++
+ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
+ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+ #define TIF_NOHZ 19 /* in adaptive nohz mode */
+@@ -137,14 +140,16 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_USEDMSA (1<<TIF_USEDMSA)
+ #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
+
+ #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
+ _TIF_SYSCALL_AUDIT | \
+- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
++ _TIF_GRSEC_SETXID)
+
+ /* work to do in syscall_trace_leave() */
+ #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
+- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
++ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+
+ /* work to do on interrupt/exception return */
+ #define _TIF_WORK_MASK \
+@@ -153,7 +158,7 @@ static inline struct thread_info *current_thread_info(void)
+ /* work to do on any return to u-space */
+ #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
+ _TIF_WORK_SYSCALL_EXIT | \
+- _TIF_SYSCALL_TRACEPOINT)
++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+
+ /*
+ * We stash processor id into a COP0 register to retrieve it fast
+diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
+index 89fa5c0b..9409cea 100644
+--- a/arch/mips/include/asm/uaccess.h
++++ b/arch/mips/include/asm/uaccess.h
+@@ -148,6 +148,7 @@ static inline bool eva_kernel_access(void)
+ __ok == 0; \
+ })
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) \
+ likely(__access_ok((addr), (size), __access_mask))
+
+diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
+index 9c7f3e1..ed42bda 100644
+--- a/arch/mips/kernel/binfmt_elfn32.c
++++ b/arch/mips/kernel/binfmt_elfn32.c
+@@ -37,6 +37,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/elfcore.h>
+ #include <linux/compat.h>
+diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
+index 1ab3432..0e66879 100644
+--- a/arch/mips/kernel/binfmt_elfo32.c
++++ b/arch/mips/kernel/binfmt_elfo32.c
+@@ -41,6 +41,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+
+ #include <linux/elfcore.h>
+diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
+index 44a1f79..2bd6aa3 100644
+--- a/arch/mips/kernel/irq-gt641xx.c
++++ b/arch/mips/kernel/irq-gt641xx.c
+@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
+ }
+ }
+
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ }
+
+ void __init gt641xx_irq_init(void)
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index 2b0a371..1e272b7 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -36,17 +36,17 @@ void ack_bad_irq(unsigned int irq)
+ printk("unexpected IRQ # %d\n", irq);
+ }
+
+-atomic_t irq_err_count;
++atomic_unchecked_t irq_err_count;
+
+ int arch_show_interrupts(struct seq_file *p, int prec)
+ {
+- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
+ return 0;
+ }
+
+ asmlinkage void spurious_interrupt(void)
+ {
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ }
+
+ void __init init_IRQ(void)
+@@ -72,6 +72,8 @@ void __init init_IRQ(void)
+ }
+
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
++
++extern void gr_handle_kernel_exploit(void);
+ static inline void check_stack_overflow(void)
+ {
+ unsigned long sp;
+@@ -87,6 +89,7 @@ static inline void check_stack_overflow(void)
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
++ gr_handle_kernel_exploit();
+ }
+ }
+ #else
+diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
+index f5c8bce..b06a560 100644
+--- a/arch/mips/kernel/kprobes.c
++++ b/arch/mips/kernel/kprobes.c
+@@ -535,6 +535,7 @@ static void __used kretprobe_trampoline_holder(void)
+
+ void kretprobe_trampoline(void);
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -543,6 +544,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->regs[31] = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ /*
+ * Called when the probe at kretprobe trampoline is hit
+@@ -611,6 +613,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ return 1;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+@@ -618,6 +621,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)kretprobe_trampoline,
+diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
+index 7cf653e..7df52f6 100644
+--- a/arch/mips/kernel/pm-cps.c
++++ b/arch/mips/kernel/pm-cps.c
+@@ -168,7 +168,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
+ nc_core_ready_count = nc_addr;
+
+ /* Ensure ready_count is zero-initialised before the assembly runs */
+- ACCESS_ONCE(*nc_core_ready_count) = 0;
++ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
+ coupled_barrier(&per_cpu(pm_barrier, core), online);
+
+ /* Run the generated entry code */
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index fbbf5fc..eec61dd 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -603,18 +603,6 @@ unsigned long get_wchan(struct task_struct *task)
+ return pc;
+ }
+
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+-
+- return sp & ALMASK;
+-}
+-
+ static void arch_dump_stack(void *info)
+ {
+ struct pt_regs *regs;
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index bf83dc1..775bed8 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -883,6 +883,10 @@ long arch_ptrace(struct task_struct *child, long request,
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+@@ -900,6 +904,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+ if (secure_computing(NULL) == -1)
+ return -1;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[2]);
+
+diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
+index 4472a7f..c5905e6 100644
+--- a/arch/mips/kernel/sync-r4k.c
++++ b/arch/mips/kernel/sync-r4k.c
+@@ -18,8 +18,8 @@
+ #include <asm/mipsregs.h>
+
+ static unsigned int initcount = 0;
+-static atomic_t count_count_start = ATOMIC_INIT(0);
+-static atomic_t count_count_stop = ATOMIC_INIT(0);
++static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
++static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
+
+ #define COUNTON 100
+ #define NR_LOOPS 3
+@@ -46,13 +46,13 @@ void synchronise_count_master(int cpu)
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ /* slaves loop on '!= 2' */
+- while (atomic_read(&count_count_start) != 1)
++ while (atomic_read_unchecked(&count_count_start) != 1)
+ mb();
+- atomic_set(&count_count_stop, 0);
++ atomic_set_unchecked(&count_count_stop, 0);
+ smp_wmb();
+
+ /* Let the slave writes its count register */
+- atomic_inc(&count_count_start);
++ atomic_inc_unchecked(&count_count_start);
+
+ /* Count will be initialised to current timer */
+ if (i == 1)
+@@ -67,11 +67,11 @@ void synchronise_count_master(int cpu)
+ /*
+ * Wait for slave to leave the synchronization point:
+ */
+- while (atomic_read(&count_count_stop) != 1)
++ while (atomic_read_unchecked(&count_count_stop) != 1)
+ mb();
+- atomic_set(&count_count_start, 0);
++ atomic_set_unchecked(&count_count_start, 0);
+ smp_wmb();
+- atomic_inc(&count_count_stop);
++ atomic_inc_unchecked(&count_count_stop);
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+@@ -96,8 +96,8 @@ void synchronise_count_slave(int cpu)
+ */
+
+ for (i = 0; i < NR_LOOPS; i++) {
+- atomic_inc(&count_count_start);
+- while (atomic_read(&count_count_start) != 2)
++ atomic_inc_unchecked(&count_count_start);
++ while (atomic_read_unchecked(&count_count_start) != 2)
+ mb();
+
+ /*
+@@ -106,8 +106,8 @@ void synchronise_count_slave(int cpu)
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+- atomic_inc(&count_count_stop);
+- while (atomic_read(&count_count_stop) != 2)
++ atomic_inc_unchecked(&count_count_stop);
++ while (atomic_read_unchecked(&count_count_stop) != 2)
+ mb();
+ }
+ /* Arrange for an interrupt in a short while */
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index ec87ef9..dac8bcd 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -702,7 +702,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
+ };
+
+ prev_state = exception_enter();
+- die_if_kernel("Integer overflow", regs);
++ if (unlikely(!user_mode(regs))) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (fixup_exception(regs)) {
++ pax_report_refcount_error(regs, NULL);
++ exception_exit(prev_state);
++ return;
++ }
++#endif
++
++ die("Integer overflow", regs);
++ }
+
+ force_sig_info(SIGFPE, &info, current);
+ exception_exit(prev_state);
+diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
+index c3e2205..b4302f8 100644
+--- a/arch/mips/lib/ashldi3.c
++++ b/arch/mips/lib/ashldi3.c
+@@ -2,7 +2,11 @@
+
+ #include "libgcc.h"
+
+-long long notrace __ashldi3(long long u, word_type b)
++#ifdef CONFIG_64BIT
++DWtype notrace __ashlti3(DWtype u, word_type b)
++#else
++DWtype notrace __ashldi3(DWtype u, word_type b)
++#endif
+ {
+ DWunion uu, w;
+ word_type bm;
+@@ -11,19 +15,22 @@ long long notrace __ashldi3(long long u, word_type b)
+ return u;
+
+ uu.ll = u;
+- bm = 32 - b;
++ bm = BITS_PER_LONG - b;
+
+ if (bm <= 0) {
+ w.s.low = 0;
+- w.s.high = (unsigned int) uu.s.low << -bm;
++ w.s.high = (unsigned long) uu.s.low << -bm;
+ } else {
+- const unsigned int carries = (unsigned int) uu.s.low >> bm;
++ const unsigned long carries = (unsigned long) uu.s.low >> bm;
+
+- w.s.low = (unsigned int) uu.s.low << b;
+- w.s.high = ((unsigned int) uu.s.high << b) | carries;
++ w.s.low = (unsigned long) uu.s.low << b;
++ w.s.high = ((unsigned long) uu.s.high << b) | carries;
+ }
+
+ return w.ll;
+ }
+-
++#ifdef CONFIG_64BIT
++EXPORT_SYMBOL(__ashlti3);
++#else
+ EXPORT_SYMBOL(__ashldi3);
++#endif
+diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
+index 1745602..d20aabf 100644
+--- a/arch/mips/lib/ashrdi3.c
++++ b/arch/mips/lib/ashrdi3.c
+@@ -2,7 +2,11 @@
+
+ #include "libgcc.h"
+
+-long long notrace __ashrdi3(long long u, word_type b)
++#ifdef CONFIG_64BIT
++DWtype notrace __ashrti3(DWtype u, word_type b)
++#else
++DWtype notrace __ashrdi3(DWtype u, word_type b)
++#endif
+ {
+ DWunion uu, w;
+ word_type bm;
+@@ -11,21 +15,24 @@ long long notrace __ashrdi3(long long u, word_type b)
+ return u;
+
+ uu.ll = u;
+- bm = 32 - b;
++ bm = BITS_PER_LONG - b;
+
+ if (bm <= 0) {
+ /* w.s.high = 1..1 or 0..0 */
+ w.s.high =
+- uu.s.high >> 31;
++ uu.s.high >> (BITS_PER_LONG - 1);
+ w.s.low = uu.s.high >> -bm;
+ } else {
+- const unsigned int carries = (unsigned int) uu.s.high << bm;
++ const unsigned long carries = (unsigned long) uu.s.high << bm;
+
+ w.s.high = uu.s.high >> b;
+- w.s.low = ((unsigned int) uu.s.low >> b) | carries;
++ w.s.low = ((unsigned long) uu.s.low >> b) | carries;
+ }
+
+ return w.ll;
+ }
+-
++#ifdef CONFIG_64BIT
++EXPORT_SYMBOL(__ashrti3);
++#else
+ EXPORT_SYMBOL(__ashrdi3);
++#endif
+diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
+index 05909d58..b03284b 100644
+--- a/arch/mips/lib/libgcc.h
++++ b/arch/mips/lib/libgcc.h
+@@ -5,13 +5,19 @@
+
+ typedef int word_type __attribute__ ((mode (__word__)));
+
++#ifdef CONFIG_64BIT
++typedef int DWtype __attribute__((mode(TI)));
++#else
++typedef long long DWtype;
++#endif
++
+ #ifdef __BIG_ENDIAN
+ struct DWstruct {
+- int high, low;
++ long high, low;
+ };
+ #elif defined(__LITTLE_ENDIAN)
+ struct DWstruct {
+- int low, high;
++ long low, high;
+ };
+ #else
+ #error I feel sick.
+@@ -19,7 +25,7 @@ struct DWstruct {
+
+ typedef union {
+ struct DWstruct s;
+- long long ll;
++ DWtype ll;
+ } DWunion;
+
+ #endif /* __ASM_LIBGCC_H */
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index 3bef306..fcec133 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -30,6 +30,23 @@
+
+ int show_unhandled_signals = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+@@ -204,6 +221,14 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
+ bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
++ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ tsk->thread.cp0_badvaddr = address;
+ tsk->thread.error_code = write;
+ if (show_unhandled_signals &&
+diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
+index e86ebcf..7a78a07 100644
+--- a/arch/mips/mm/init.c
++++ b/arch/mips/mm/init.c
+@@ -474,10 +474,10 @@ void __init mem_init(void)
+
+ #ifdef CONFIG_64BIT
+ if ((unsigned long) &_text > (unsigned long) CKSEG0)
+- /* The -4 is a hack so that user tools don't have to handle
++ /* The -0x2000-4 is a hack so that user tools don't have to handle
+ the overflow. */
+ kclist_add(&kcore_kseg0, (void *) CKSEG0,
+- 0x80000000 - 4, KCORE_TEXT);
++ 0x80000000 - 0x2000 - 4, KCORE_TEXT);
+ #endif
+ }
+ #endif /* !CONFIG_NEED_MULTIPLE_NODES */
+diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
+index d08ea3f..66bb13d 100644
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ struct vm_area_struct *vma;
+ unsigned long addr = addr0;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (unlikely(len > TASK_SIZE))
+@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ do_color_align = 1;
+
+ /* requesting a specific address */
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+ info.length = len;
+ info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+
+ if (dir == DOWN) {
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+@@ -160,14 +166,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
+index cfceaea..65deeb4 100644
+--- a/arch/mips/sgi-ip27/ip27-nmi.c
++++ b/arch/mips/sgi-ip27/ip27-nmi.c
+@@ -187,9 +187,9 @@ void
+ cont_nmi_dump(void)
+ {
+ #ifndef REAL_NMI_SIGNAL
+- static atomic_t nmied_cpus = ATOMIC_INIT(0);
++ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
+
+- atomic_inc(&nmied_cpus);
++ atomic_inc_unchecked(&nmied_cpus);
+ #endif
+ /*
+ * Only allow 1 cpu to proceed
+@@ -233,7 +233,7 @@ cont_nmi_dump(void)
+ udelay(10000);
+ }
+ #else
+- while (atomic_read(&nmied_cpus) != num_online_cpus());
++ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
+ #endif
+
+ /*
+diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
+index 160b880..3b53fdc 100644
+--- a/arch/mips/sni/rm200.c
++++ b/arch/mips/sni/rm200.c
+@@ -270,7 +270,7 @@ void sni_rm200_mask_and_ack_8259A(struct irq_data *d)
+ "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
+index 41e873b..34d33a7 100644
+--- a/arch/mips/vr41xx/common/icu.c
++++ b/arch/mips/vr41xx/common/icu.c
+@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
+
+ printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
+
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+
+ return -1;
+ }
+diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
+index ae0e4ee..e8f0692 100644
+--- a/arch/mips/vr41xx/common/irq.c
++++ b/arch/mips/vr41xx/common/irq.c
+@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
+ irq_cascade_t *cascade;
+
+ if (irq >= NR_IRQS) {
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ return;
+ }
+
+@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
+ ret = cascade->get_irq(irq);
+ irq = ret;
+ if (ret < 0)
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ else
+ irq_dispatch(irq);
+ if (!irqd_irq_disabled(idata) && chip->irq_unmask)
+diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+index 967d144..db12197 100644
+--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
++++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+@@ -11,12 +11,14 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache */
+
+ #define L1_CACHE_NWAYS 4 /* number of ways in caches */
+ #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
+-#define L1_CACHE_BYTES 16 /* bytes per entry */
+ #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
+ #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
+
+ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+index bcb5df2..84fabd2 100644
+--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
++++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+@@ -16,13 +16,15 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
+
++#include <linux/const.h>
++
+ /*
+ * L1 cache
+ */
+ #define L1_CACHE_NWAYS 4 /* number of ways in caches */
+ #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
+-#define L1_CACHE_BYTES 32 /* bytes per entry */
+ #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
+ #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
+
+ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
+index 5f55da9..7ce9437 100644
+--- a/arch/openrisc/include/asm/cache.h
++++ b/arch/openrisc/include/asm/cache.h
+@@ -19,13 +19,15 @@
+ #ifndef __ASM_OPENRISC_CACHE_H
+ #define __ASM_OPENRISC_CACHE_H
+
++#include <linux/const.h>
++
+ /* FIXME: How can we replace these with values from the CPU...
+ * they shouldn't be hard-coded!
+ */
+
+ #define __ro_after_init __read_mostly
+
+-#define L1_CACHE_BYTES 16
+ #define L1_CACHE_SHIFT 4
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* __ASM_OPENRISC_CACHE_H */
+diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
+index 5394b9c..e77a306 100644
+--- a/arch/parisc/include/asm/atomic.h
++++ b/arch/parisc/include/asm/atomic.h
+@@ -327,6 +327,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+ return dec;
+ }
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* !CONFIG_64BIT */
+
+
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index df0f52b..810699b 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -5,6 +5,7 @@
+ #ifndef __ARCH_PARISC_CACHE_H
+ #define __ARCH_PARISC_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * PA 2.0 processors have 64 and 128-byte L2 cachelines; PA 1.1 processors
+@@ -14,6 +15,8 @@
+ #define L1_CACHE_BYTES 16
+ #define L1_CACHE_SHIFT 4
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
++
+ #ifndef __ASSEMBLY__
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
+index 78c9fd3..42fa66a 100644
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
+index f08dda3..ea6aa1b 100644
+--- a/arch/parisc/include/asm/pgalloc.h
++++ b/arch/parisc/include/asm/pgalloc.h
+@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+ (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+ }
+
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
+@@ -96,6 +101,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, x) do { } while (0)
+ #define pgd_populate(mm, pmd, pte) BUG()
++#define pgd_populate_kernel(mm, pmd, pte) BUG()
+
+ #endif
+
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 3a4ed9f..29b7218 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -236,6 +236,17 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
+ #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 0497cec..58b097a 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -256,10 +256,10 @@ static inline void copy_user_overflow(int size, unsigned long count)
+ static __always_inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+ unsigned long ret = n;
+
+- if (likely(sz < 0 || sz >= n)) {
++ if (likely(sz == (size_t)-1 || sz >= n)) {
+ check_object_size(to, n, false);
+ ret = __copy_from_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+@@ -267,7 +267,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+ else
+ __bad_copy_user();
+
+- if (unlikely(ret))
++ if (unlikely(ret && (long)ret > 0))
+ memset(to + (n - ret), 0, ret);
+
+ return ret;
+@@ -276,9 +276,9 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+ static __always_inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- int sz = __compiletime_object_size(from);
++ size_t sz = __compiletime_object_size(to);
+
+- if (likely(sz < 0 || sz >= n)) {
++ if (likely(sz == (size_t)-1 || sz >= n)) {
+ check_object_size(from, n, true);
+ n = __copy_to_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
+index a0ecdb4a..71d2069 100644
+--- a/arch/parisc/kernel/module.c
++++ b/arch/parisc/kernel/module.c
+@@ -100,14 +100,12 @@
+ * or init pieces the location is */
+ static inline int in_init(struct module *me, void *loc)
+ {
+- return (loc >= me->init_layout.base &&
+- loc <= (me->init_layout.base + me->init_layout.size));
++ within_module_init((unsigned long)loc, me);
+ }
+
+ static inline int in_core(struct module *me, void *loc)
+ {
+- return (loc >= me->core_layout.base &&
+- loc <= (me->core_layout.base + me->core_layout.size));
++ within_module_core((unsigned long)loc, me);
+ }
+
+ static inline int in_local(struct module *me, void *loc)
+@@ -367,13 +365,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+ }
+
+ /* align things a bit */
+- me->core_layout.size = ALIGN(me->core_layout.size, 16);
+- me->arch.got_offset = me->core_layout.size;
+- me->core_layout.size += gots * sizeof(struct got_entry);
++ me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 16);
++ me->arch.got_offset = me->core_layout.size_rw;
++ me->core_layout.size_rw += gots * sizeof(struct got_entry);
+
+- me->core_layout.size = ALIGN(me->core_layout.size, 16);
+- me->arch.fdesc_offset = me->core_layout.size;
+- me->core_layout.size += fdescs * sizeof(Elf_Fdesc);
++ me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 16);
++ me->arch.fdesc_offset = me->core_layout.size_rw;
++ me->core_layout.size_rw += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+@@ -391,7 +389,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+
+ BUG_ON(value == 0);
+
+- got = me->core_layout.base + me->arch.got_offset;
++ got = me->core_layout.base_rw + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+@@ -409,7 +407,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+- Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset;
++ Elf_Fdesc *fdesc = me->core_layout.base_rw + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -427,7 +425,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+
+ /* Create new one */
+ fdesc->addr = value;
+- fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
++ fdesc->gp = (Elf_Addr)me->core_layout.base_rw + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -847,7 +845,7 @@ register_unwind_table(struct module *me,
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+- gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
++ gp = (Elf_Addr)me->core_layout.base_rw + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index 0a393a0..5b3199e0 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -92,6 +92,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long task_size = TASK_SIZE;
+ int do_color_align, last_mmap;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ if (len > task_size)
+ return -ENOMEM;
+@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ goto found_addr;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align && last_mmap)
+ addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+@@ -127,6 +132,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ info.high_limit = mmap_upper_limit();
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+ info.align_offset = shared_align_offset(last_mmap, pgoff);
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ found_addr:
+@@ -146,6 +152,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ unsigned long addr = addr0;
+ int do_color_align, last_mmap;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ #ifdef CONFIG_64BIT
+ /* This should only ever run for 32-bit processes. */
+@@ -170,6 +177,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ }
+
+ /* requesting a specific address */
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align && last_mmap)
+ addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+@@ -187,6 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+ info.align_offset = shared_align_offset(last_mmap, pgoff);
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ goto found_addr;
+@@ -252,6 +264,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ mm->mmap_legacy_base = mmap_legacy_base();
+ mm->mmap_base = mmap_upper_limit();
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP) {
++ mm->mmap_legacy_base += mm->delta_mmap;
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++ }
++#endif
++
+ if (mmap_is_legacy()) {
+ mm->mmap_base = mm->mmap_legacy_base;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 378df92..9b2ab51 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -719,9 +719,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 040c48f..d133dce 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -16,6 +16,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/extable.h>
+ #include <linux/uaccess.h>
++#include <linux/unistd.h>
+
+ #include <asm/traps.h>
+
+@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int *)addr);
++ err |= get_user(bv, (unsigned int *)(addr+4));
++ err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fix;
+@@ -298,8 +409,33 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
+
+ good_area:
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 65fba4c..3cfec12 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -140,6 +140,7 @@ config PPC
+ select ARCH_USE_BUILTIN_BSWAP
+ select OLD_SIGSUSPEND
+ select OLD_SIGACTION if PPC32
++ select HAVE_GCC_PLUGINS
+ select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select ARCH_USE_CMPXCHG_LOCKREF if PPC64
+@@ -441,6 +442,7 @@ config KEXEC
+ bool "kexec system call"
+ depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
+index 2b90335..5e1a3d6 100644
+--- a/arch/powerpc/include/asm/atomic.h
++++ b/arch/powerpc/include/asm/atomic.h
+@@ -12,6 +12,11 @@
+
+ #define ATOMIC_INIT(i) { (i) }
+
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++ PPC_LONG" " #from ", " #to"\n" \
++" .previous\n"
++
+ /*
+ * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
+ * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
+@@ -39,38 +44,79 @@ static __inline__ int atomic_read(const atomic_t *v)
+ return t;
+ }
+
++static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ int t;
++
++ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
++
++ return t;
++}
++
+ static __inline__ void atomic_set(atomic_t *v, int i)
+ {
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ }
+
+-#define ATOMIC_OP(op, asm_op) \
+-static __inline__ void atomic_##op(int a, atomic_t *v) \
++static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
++}
++
++#ifdef CONFIG_PAX_REFCOUNT
++#define __REFCOUNT_OP(op) op##o.
++#define __OVERFLOW_PRE \
++ " mcrxr cr0\n"
++#define __OVERFLOW_POST \
++ " bf 4*cr0+so, 3f\n" \
++ "2: .long 0x00c00b00\n" \
++ "3:\n"
++#define __OVERFLOW_EXTABLE \
++ "\n4:\n" \
++ _ASM_EXTABLE(2b, 4b)
++#else
++#define __REFCOUNT_OP(op) op
++#define __OVERFLOW_PRE
++#define __OVERFLOW_POST
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
++static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
+ { \
+ int t; \
+ \
+ __asm__ __volatile__( \
+-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
++"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
++ pre_op \
+ #asm_op " %0,%2,%0\n" \
++ post_op \
+ PPC405_ERR77(0,%3) \
+ " stwcx. %0,0,%3 \n" \
+ " bne- 1b\n" \
++ extable \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+ } \
+
+-#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
+-static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
++#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
++ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
++static inline int atomic_##op##_return##suffix##_relaxed(int a, atomic##suffix##_t *v)\
+ { \
+ int t; \
+ \
+ __asm__ __volatile__( \
+-"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
++"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "_relaxed\n"\
++ pre_op \
+ #asm_op " %0,%2,%0\n" \
++ post_op \
+ PPC405_ERR77(0, %3) \
+ " stwcx. %0,0,%3\n" \
+ " bne- 1b\n" \
++ extable \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+@@ -78,6 +124,9 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
+ return t; \
+ }
+
++#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
++ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
+ static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
+ { \
+@@ -105,6 +154,7 @@ ATOMIC_OPS(add, add)
+ ATOMIC_OPS(sub, subf)
+
+ #define atomic_add_return_relaxed atomic_add_return_relaxed
++#define atomic_add_return_unchecked_relaxed atomic_add_return_unchecked_relaxed
+ #define atomic_sub_return_relaxed atomic_sub_return_relaxed
+
+ #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+@@ -126,41 +176,22 @@ ATOMIC_OPS(xor, xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP_RELAXED
+ #undef ATOMIC_OP_RETURN_RELAXED
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+-static __inline__ void atomic_inc(atomic_t *v)
+-{
+- int t;
+-
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_inc\n\
+- addic %0,%0,1\n"
+- PPC405_ERR77(0,%2)
+-" stwcx. %0,0,%2 \n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-}
+-
+-static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
+-{
+- int t;
+-
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
+-" addic %0,%0,1\n"
+- PPC405_ERR77(0, %2)
+-" stwcx. %0,0,%2\n"
+-" bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-
+- return t;
+-}
++/*
++ * atomic_inc - increment atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Automatically increments @v by 1
++ */
++#define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_unchecked(v) atomic_add_unchecked(1, (v))
++#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
++#define atomic_inc_return_unchecked_relaxed(v) atomic_add_return_unchecked_relaxed(1, (v))
+
+ /*
+ * atomic_inc_and_test - increment and test
+@@ -171,37 +202,20 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+-
+-static __inline__ void atomic_dec(atomic_t *v)
+-{
+- int t;
+-
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_dec\n\
+- addic %0,%0,-1\n"
+- PPC405_ERR77(0,%2)\
+-" stwcx. %0,0,%2\n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-}
+-
+-static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
++#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
++
++/*
++ * atomic_dec - decrement atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Atomically decrements @v by 1
++ */
++#define atomic_dec(v) atomic_sub(1, (v))
++#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
++
++static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
+ {
+- int t;
+-
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
+-" addic %0,%0,-1\n"
+- PPC405_ERR77(0, %2)
+-" stwcx. %0,0,%2\n"
+-" bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-
+- return t;
++ atomic_sub_unchecked(1, v);
+ }
+
+ #define atomic_inc_return_relaxed atomic_inc_return_relaxed
+@@ -216,6 +230,16 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+ #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&(v->counter), new);
++}
++
+ /**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+@@ -233,14 +257,21 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ PPC_ATOMIC_ENTRY_BARRIER
+ "1: lwarx %0,0,%1 # __atomic_add_unless\n\
+ cmpw 0,%0,%3 \n\
+- beq 2f \n\
+- add %0,%2,%0 \n"
++ beq 5f \n"
++
++ __OVERFLOW_PRE
++ __REFCOUNT_OP(add) " %0,%2,%0 \n"
++ __OVERFLOW_POST
++
+ PPC405_ERR77(0,%2)
+ " stwcx. %0,0,%1 \n\
+ bne- 1b \n"
++
++ __OVERFLOW_EXTABLE
++
+ PPC_ATOMIC_EXIT_BARRIER
+ " subf %0,%2,%0 \n\
+-2:"
++5:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+@@ -323,37 +354,59 @@ static __inline__ long atomic64_read(const atomic64_t *v)
+ return t;
+ }
+
++static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ long t;
++
++ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
++
++ return t;
++}
++
+ static __inline__ void atomic64_set(atomic64_t *v, long i)
+ {
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ }
+
+-#define ATOMIC64_OP(op, asm_op) \
+-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
++static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
++}
++
++#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
++static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
+ { \
+ long t; \
+ \
+ __asm__ __volatile__( \
+ "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
++ pre_op \
+ #asm_op " %0,%2,%0\n" \
++ post_op \
+ " stdcx. %0,0,%3 \n" \
+ " bne- 1b\n" \
++ extable \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+ }
+
+-#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
+-static inline long \
+-atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
++#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
++ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
++static inline long atomic64_##op##_return##suffix##_relaxed(long a, atomic64##suffix##_t *v)\
+ { \
+ long t; \
+ \
+ __asm__ __volatile__( \
+ "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
++ pre_op \
+ #asm_op " %0,%2,%0\n" \
++ post_op \
+ " stdcx. %0,0,%3\n" \
+ " bne- 1b\n" \
++ extable \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+@@ -361,6 +414,9 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
+ return t; \
+ }
+
++#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
++ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
+ static inline long \
+ atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
+@@ -409,38 +465,33 @@ ATOMIC64_OPS(xor, xor)
+ #undef ATOPIC64_OPS
+ #undef ATOMIC64_FETCH_OP_RELAXED
+ #undef ATOMIC64_OP_RETURN_RELAXED
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST
++#undef __OVERFLOW_PRE
++#undef __REFCOUNT_OP
+
+ #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+
+-static __inline__ void atomic64_inc(atomic64_t *v)
+-{
+- long t;
++/*
++ * atomic64_inc - increment atomic variable
++ * @v: pointer of type atomic64_t
++ *
++ * Automatically increments @v by 1
++ */
++#define atomic64_inc(v) atomic64_add(1, (v))
++#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
+
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_inc\n\
+- addic %0,%0,1\n\
+- stdcx. %0,0,%2 \n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
+ }
+
+-static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
++static inline long atomic64_inc_return_unchecked_relaxed(atomic64_unchecked_t *v)
+ {
+- long t;
+-
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
+-" addic %0,%0,1\n"
+-" stdcx. %0,0,%2\n"
+-" bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-
+- return t;
++ return atomic64_add_return_unchecked_relaxed(1, v);
+ }
+
+ /*
+@@ -453,34 +504,18 @@ static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
+ */
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+-static __inline__ void atomic64_dec(atomic64_t *v)
++/*
++ * atomic64_dec - decrement atomic variable
++ * @v: pointer of type atomic64_t
++ *
++ * Atomically decrements @v by 1
++ */
++#define atomic64_dec(v) atomic64_sub(1, (v))
++#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
++
++static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
+ {
+- long t;
+-
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_dec\n\
+- addic %0,%0,-1\n\
+- stdcx. %0,0,%2\n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-}
+-
+-static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
+-{
+- long t;
+-
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
+-" addic %0,%0,-1\n"
+-" stdcx. %0,0,%2\n"
+-" bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-
+- return t;
++ atomic64_sub_unchecked(1, v);
+ }
+
+ #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+@@ -522,6 +557,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+ #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&(v->counter), new);
++}
++
+ /**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+@@ -537,15 +582,22 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
++"1: ldarx %0,0,%1 # atomic64_add_unless\n\
+ cmpd 0,%0,%3 \n\
+- beq 2f \n\
+- add %0,%2,%0 \n"
++ beq 5f \n"
++
++ __OVERFLOW_PRE
++ __REFCOUNT_OP(add) " %0,%2,%0 \n"
++ __OVERFLOW_POST
++
+ " stdcx. %0,0,%1 \n\
+ bne- 1b \n"
+ PPC_ATOMIC_EXIT_BARRIER
++
++ __OVERFLOW_EXTABLE
++
+ " subf %0,%2,%0 \n\
+-2:"
++5:"
+ : "=&r" (t)
+ : "r" (&v->counter), "r" (a), "r" (u)
+ : "cc", "memory");
+diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
+index 880db13..bb4ed4a 100644
+--- a/arch/powerpc/include/asm/book3s/32/hash.h
++++ b/arch/powerpc/include/asm/book3s/32/hash.h
+@@ -20,6 +20,7 @@
+ #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+ #define _PAGE_USER 0x004 /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
++#define _PAGE_NX _PAGE_GUARDED
+ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
+index 6b8b2d5..cf17a29 100644
+--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
+@@ -227,7 +227,7 @@ static inline void __ptep_set_access_flags(struct mm_struct *mm,
+ pte_t *ptep, pte_t entry)
+ {
+ unsigned long set = pte_val(entry) &
+- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
++ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC | _PAGE_NX);
+ unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+
+ pte_update(ptep, clr, set);
+diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
+index cd5e7aa..7709061 100644
+--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
++++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
+@@ -91,6 +91,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+ pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
+ }
+
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++ pgd_populate(mm, pgd, pud);
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
+@@ -106,6 +111,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate_kernel(mm, pud, pmd);
++}
++
+ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long address)
+ {
+diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
+index ffbafbf..71d037fb 100644
+--- a/arch/powerpc/include/asm/cache.h
++++ b/arch/powerpc/include/asm/cache.h
+@@ -3,6 +3,8 @@
+
+ #ifdef __KERNEL__
+
++#include <asm/reg.h>
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+@@ -22,7 +24,7 @@
+ #define L1_CACHE_SHIFT 7
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
+index ee46ffe..b36c98c 100644
+--- a/arch/powerpc/include/asm/elf.h
++++ b/arch/powerpc/include/asm/elf.h
+@@ -30,6 +30,18 @@
+
+ #define ELF_ET_DYN_BASE 0x20000000
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
++#endif
++
+ #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
+
+ /*
+diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
+index 8196e9c..d83a9f3 100644
+--- a/arch/powerpc/include/asm/exec.h
++++ b/arch/powerpc/include/asm/exec.h
+@@ -4,6 +4,6 @@
+ #ifndef _ASM_POWERPC_EXEC_H
+ #define _ASM_POWERPC_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_POWERPC_EXEC_H */
+diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
+index 5acabbd..7ea14fa 100644
+--- a/arch/powerpc/include/asm/kmap_types.h
++++ b/arch/powerpc/include/asm/kmap_types.h
+@@ -10,7 +10,7 @@
+ * 2 of the License, or (at your option) any later version.
+ */
+
+-#define KM_TYPE_NR 16
++#define KM_TYPE_NR 17
+
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_KMAP_TYPES_H */
+diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
+index b8da913..c02b593 100644
+--- a/arch/powerpc/include/asm/local.h
++++ b/arch/powerpc/include/asm/local.h
+@@ -9,21 +9,65 @@ typedef struct
+ atomic_long_t a;
+ } local_t;
+
++typedef struct
++{
++ atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l,i) atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
+
+ #define local_add(i,l) atomic_long_add((i),(&(l)->a))
++#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
+ #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
++#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
+ #define local_inc(l) atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l) atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
+
+ static __inline__ long local_add_return(long a, local_t *l)
+ {
+ long t;
+
+ __asm__ __volatile__(
++"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addo. %0,%1,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" add %0,%1,%0\n"
++#endif
++
++"3:\n"
++ PPC405_ERR77(0,%2)
++ PPC_STLCX "%0,0,%2 \n\
++ bne- 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (t)
++ : "r" (a), "r" (&(l->a.counter))
++ : "cc", "memory");
++
++ return t;
++}
++
++static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
++{
++ long t;
++
++ __asm__ __volatile__(
+ "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
+ add %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
+
+ #define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+ #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
+
+ /**
+diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
+index 30922f6..0bb237c 100644
+--- a/arch/powerpc/include/asm/mman.h
++++ b/arch/powerpc/include/asm/mman.h
+@@ -26,7 +26,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
+ }
+ #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
+
+-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
+ {
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+ }
+diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
+index 897d2e1..399f34f 100644
+--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
++++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
+@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ #ifndef CONFIG_PPC_64K_PAGES
+
+ #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD)
++#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
+
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ pud_set(pud, (unsigned long)pmd);
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
++
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+ {
+@@ -139,6 +145,7 @@ extern void __tlb_remove_table(void *_table);
+ #endif
+
+ #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index 56398e7..287a772 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -230,8 +230,9 @@ extern long long virt_phys_offset;
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+@@ -259,6 +260,9 @@ extern long long virt_phys_offset;
+ #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef CONFIG_PPC_BOOK3S_64
+ /*
+ * Use the top bit of the higher-level page table entries to indicate whether
+diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
+index dd5f071..0470718 100644
+--- a/arch/powerpc/include/asm/page_64.h
++++ b/arch/powerpc/include/asm/page_64.h
+@@ -169,15 +169,18 @@ do { \
+ * stack by default, so in the absence of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (is_32bit_task() ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/getorder.h>
+
+diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
+index 9bd87f2..f600e6d 100644
+--- a/arch/powerpc/include/asm/pgtable.h
++++ b/arch/powerpc/include/asm/pgtable.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_POWERPC_PGTABLE_H
+ #define _ASM_POWERPC_PGTABLE_H
+
++#include <linux/const.h>
+ #ifndef __ASSEMBLY__
+ #include <linux/mmdebug.h>
+ #include <linux/mmzone.h>
+diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
+index 4ba26dd..2d1137d 100644
+--- a/arch/powerpc/include/asm/pte-common.h
++++ b/arch/powerpc/include/asm/pte-common.h
+@@ -16,6 +16,9 @@
+ #ifndef _PAGE_EXEC
+ #define _PAGE_EXEC 0
+ #endif
++#ifndef _PAGE_NX
++#define _PAGE_NX 0
++#endif
+ #ifndef _PAGE_ENDIAN
+ #define _PAGE_ENDIAN 0
+ #endif
+@@ -53,13 +56,13 @@
+ #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
+ #endif
+ #ifndef _PAGE_KERNEL_RO
+-#define _PAGE_KERNEL_RO (_PAGE_RO)
++#define _PAGE_KERNEL_RO (_PAGE_RO | _PAGE_NX)
+ #endif
+ #ifndef _PAGE_KERNEL_ROX
+ #define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_RO)
+ #endif
+ #ifndef _PAGE_KERNEL_RW
+-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
++#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_NX)
+ #endif
+ #ifndef _PAGE_KERNEL_RWX
+ #define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC)
+@@ -142,15 +145,12 @@ static inline bool pte_user(pte_t pte)
+ * Note due to the way vm flags are laid out, the bits are XWR
+ */
+ #define PAGE_NONE __pgprot(_PAGE_BASE)
+-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
+- _PAGE_EXEC)
+-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
+-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
+- _PAGE_EXEC)
+-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
+-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
+- _PAGE_EXEC)
++#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_NX)
++#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
++#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_NX)
++#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_EXEC)
++#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_NX)
++#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | _PAGE_EXEC)
+
+ #define __P000 PAGE_NONE
+ #define __P001 PAGE_READONLY
+@@ -171,11 +171,9 @@ static inline bool pte_user(pte_t pte)
+ #define __S111 PAGE_SHARED_X
+
+ /* Permission masks used for kernel mappings */
+-#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+-#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+- _PAGE_NO_CACHE)
+-#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+- _PAGE_NO_CACHE | _PAGE_GUARDED)
++#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_NX)
++#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
++#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
+ #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+ #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 13f5fad..6ec27c3 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -270,6 +270,7 @@
+ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+ #define DSISR_NOHPTE 0x40000000 /* no translation found */
++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
+ #define DSISR_PROTFAULT 0x08000000 /* protection fault */
+ #define DSISR_ISSTORE 0x02000000 /* access was a store */
+ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index 0d02c11..33a8f08 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -51,7 +51,7 @@ struct smp_ops_t {
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+-};
++} __no_const;
+
+ extern void smp_send_debugger_break(void);
+ extern void start_secondary_resume(void);
+diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
+index fa37fe9..867d3cf 100644
+--- a/arch/powerpc/include/asm/spinlock.h
++++ b/arch/powerpc/include/asm/spinlock.h
+@@ -27,6 +27,7 @@
+ #include <asm/asm-compat.h>
+ #include <asm/synch.h>
+ #include <asm/ppc-opcode.h>
++#include <asm/atomic.h>
+
+ #ifdef CONFIG_PPC64
+ /* use 0x800000yy when locked, where yy == CPU number */
+@@ -228,13 +229,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
+ __asm__ __volatile__(
+ "1: " PPC_LWARX(%0,0,%1,1) "\n"
+ __DO_SIGN_EXTEND
+-" addic. %0,%0,1\n\
+- ble- 2f\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addico. %0,%0,1\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" addic. %0,%0,1\n"
++#endif
++
++"3:\n"
++ "ble- 4f\n"
+ PPC405_ERR77(0,%1)
+ " stwcx. %0,0,%1\n\
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+-"2:" : "=&r" (tmp)
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b,4b)
++#endif
++
++ : "=&r" (tmp)
+ : "r" (&rw->lock)
+ : "cr0", "xer", "memory");
+
+@@ -310,11 +327,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
+ __asm__ __volatile__(
+ "# read_unlock\n\t"
+ PPC_RELEASE_BARRIER
+-"1: lwarx %0,0,%1\n\
+- addic %0,%0,-1\n"
++"1: lwarx %0,0,%1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addico. %0,%0,-1\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" addic. %0,%0,-1\n"
++#endif
++
++"3:\n"
+ PPC405_ERR77(0,%1)
+ " stwcx. %0,0,%1\n\
+ bne- 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r"(tmp)
+ : "r"(&rw->lock)
+ : "cr0", "xer", "memory");
+diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
+index da3cdff..c774844 100644
+--- a/arch/powerpc/include/asm/string.h
++++ b/arch/powerpc/include/asm/string.h
+@@ -11,17 +11,17 @@
+ #define __HAVE_ARCH_MEMCMP
+ #define __HAVE_ARCH_MEMCHR
+
+-extern char * strcpy(char *,const char *);
+-extern char * strncpy(char *,const char *, __kernel_size_t);
+-extern __kernel_size_t strlen(const char *);
+-extern int strcmp(const char *,const char *);
+-extern int strncmp(const char *, const char *, __kernel_size_t);
+-extern char * strcat(char *, const char *);
++extern char * strcpy(char *,const char *) __nocapture(2);
++extern char * strncpy(char *,const char *, __kernel_size_t) __nocapture(2);
++extern __kernel_size_t strlen(const char *) __nocapture(1);
++extern int strcmp(const char *,const char *) __nocapture();
++extern int strncmp(const char *, const char *, __kernel_size_t) __nocapture(1, 2);
++extern char * strcat(char *, const char *) __nocapture(2);
+ extern void * memset(void *,int,__kernel_size_t);
+-extern void * memcpy(void *,const void *,__kernel_size_t);
+-extern void * memmove(void *,const void *,__kernel_size_t);
+-extern int memcmp(const void *,const void *,__kernel_size_t);
+-extern void * memchr(const void *,int,__kernel_size_t);
++extern void * memcpy(void *,const void *,__kernel_size_t) __nocapture(2);
++extern void * memmove(void *,const void *,__kernel_size_t) __nocapture(2);
++extern int memcmp(const void *,const void *,__kernel_size_t) __nocapture(1, 2);
++extern void * memchr(const void *,int,__kernel_size_t) __nocapture(1);
+
+ #endif /* __KERNEL__ */
+
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index 87e4b2d..c362390 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void)
+ #if defined(CONFIG_PPC64)
+ #define TIF_ELF2ABI 18 /* function descriptors must die! */
+ #endif
++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
++#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
+
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
+ #define _TIF_NOHZ (1<<TIF_NOHZ)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
+ #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+- _TIF_NOHZ)
++ _TIF_NOHZ | _TIF_GRSEC_SETXID)
+
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index c266227..f3dc6bb 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -58,6 +58,7 @@
+
+ #endif
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) \
+ (__chk_user_ptr(addr), \
+ __access_ok((__force unsigned long)(addr), (size), get_fs()))
+@@ -303,43 +304,6 @@ do { \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+- const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n))) {
+- check_object_size(to, n, false);
+- return __copy_tofrom_user((__force void __user *)to, from, n);
+- }
+- memset(to, 0, n);
+- return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+- const void *from, unsigned long n)
+-{
+- if (access_ok(VERIFY_WRITE, to, n)) {
+- check_object_size(from, n, true);
+- return __copy_tofrom_user(to, (__force void __user *)from, n);
+- }
+- return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+- __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+- unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+- unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+- unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+ {
+@@ -412,6 +376,70 @@ static inline unsigned long __copy_to_user(void __user *to,
+ return __copy_to_user_inatomic(to, from, size);
+ }
+
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++ const void __user *from, unsigned long n)
++{
++ if ((long)n < 0)
++ return n;
++
++ if (likely(access_ok(VERIFY_READ, from, n))) {
++ check_object_size(to, n, false);
++ return __copy_tofrom_user((void __force_user *)to, from, n);
++ }
++ memset(to, 0, n);
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++ const void *from, unsigned long n)
++{
++ if ((long)n < 0)
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ check_object_size(from, n, true);
++ return __copy_tofrom_user(to, (void __force_user *)from, n);
++ }
++ return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++ __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (likely(access_ok(VERIFY_READ, from, n))) {
++ check_object_size(to, n, false);
++ n = __copy_from_user(to, from, n);
++ } else
++ memset(to, 0, n);
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ check_object_size(from, n, true);
++ n = __copy_to_user(to, from, n);
++ }
++ return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++ unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index 1925341..a1841ac 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -15,7 +15,7 @@ CFLAGS_btext.o += -fPIC
+ endif
+
+ CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+-CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
++CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+
+@@ -31,6 +31,8 @@ CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_time.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+ endif
+
++CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
++
+ obj-y := cputable.o ptrace.o syscalls.o \
+ irq.o align.o signal_32.o pmc.o vdso.o \
+ process.o systbl.o idle.o \
+diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
+index 38a1f96..ed94e42 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -1010,6 +1010,7 @@ storage_fault_common:
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl save_nvgprs
+ mr r4,r14
+ mr r5,r15
+ ld r14,PACA_EXGEN+EX_R14(r13)
+@@ -1018,8 +1019,7 @@ storage_fault_common:
+ cmpdi r3,0
+ bne- 1f
+ b ret_from_except_lite
+-1: bl save_nvgprs
+- mr r5,r3
++1: mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ld r4,_DAR(r1)
+ bl bad_page_fault
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 1ba82ea..f78bd700 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1445,10 +1445,10 @@ handle_page_fault:
+ 11: ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl save_nvgprs
+ bl do_page_fault
+ cmpdi r3,0
+ beq+ 12f
+- bl save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 3c05c31..a8e6888 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -482,6 +482,8 @@ void migrate_irqs(void)
+ }
+ #endif
+
++extern void gr_handle_kernel_exploit(void);
++
+ static inline void check_stack_overflow(void)
+ {
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+@@ -494,6 +496,7 @@ static inline void check_stack_overflow(void)
+ pr_err("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
++ gr_handle_kernel_exploit();
+ }
+ #endif
+ }
+diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
+index e785cc9..514488c 100644
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -131,6 +131,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ kcb->kprobe_saved_msr = regs->msr;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -139,6 +140,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->link = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ static int __kprobes kprobe_handler(struct pt_regs *regs)
+ {
+@@ -547,6 +549,7 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline_p);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
+@@ -554,3 +557,4 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
+index 5a7a78f..c0e4207 100644
+--- a/arch/powerpc/kernel/module_32.c
++++ b/arch/powerpc/kernel/module_32.c
+@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
++ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
+
+ pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->core_layout.base
+- && location < mod->core_layout.base + mod->core_layout.size)
++ if ((location >= mod->core_layout.base_rx && location < mod->core_layout.base_rx + mod->core_layout.size_rx) ||
++ (location >= mod->core_layout.base_rw && location < mod->core_layout.base_rw + mod->core_layout.size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->init_layout.base_rx && location < mod->init_layout.base_rx + mod->init_layout.size_rx) ||
++ (location >= mod->init_layout.base_rw && location < mod->init_layout.base_rw + mod->init_layout.size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+@@ -301,7 +306,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
+ {
+- module->arch.tramp = do_plt_call(module->core_layout.base,
++ module->arch.tramp = do_plt_call(module->core_layout.base_rx,
+ (unsigned long)ftrace_caller,
+ sechdrs, module);
+ if (!module->arch.tramp)
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 49a680d..2514bbcb 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1375,8 +1375,8 @@ void show_regs(struct pt_regs * regs)
+ * Lookup NIP late so we have the best change of getting the
+ * above info out without failing
+ */
+- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
+ #endif
+ show_stack(current, (unsigned long *) regs->gpr[1]);
+ if (!user_mode(regs))
+@@ -1897,10 +1897,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ newsp = stack[0];
+ ip = stack[STACK_FRAME_LR_SAVE];
+ if (!firstframe || ip != lr) {
+- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((ip == rth) && curr_frame >= 0) {
+- pr_cont(" (%pS)",
++ pr_cont(" (%pA)",
+ (void *)current->ret_stack[curr_frame].ret);
+ curr_frame--;
+ }
+@@ -1920,7 +1920,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ struct pt_regs *regs = (struct pt_regs *)
+ (sp + STACK_FRAME_OVERHEAD);
+ lr = regs->link;
+- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
++ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
+ regs->trap, (void *)regs->nip, (void *)lr);
+ firstframe = 1;
+ }
+@@ -1957,13 +1957,6 @@ void notrace __ppc64_runlatch_off(void)
+ }
+ #endif /* CONFIG_PPC64 */
+
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+ static inline unsigned long brk_rnd(void)
+ {
+ unsigned long rnd = 0;
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index 5c8f12f..98047fb 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -3151,7 +3151,7 @@ static int do_seccomp(struct pt_regs *regs)
+ * have already loaded -ENOSYS into r3, or seccomp has put
+ * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
+ */
+- if (__secure_computing(NULL))
++ if (secure_computing(NULL))
+ return -1;
+
+ /*
+@@ -3169,6 +3169,10 @@ static int do_seccomp(struct pt_regs *regs)
+ static inline int do_seccomp(struct pt_regs *regs) { return 0; }
+ #endif /* CONFIG_SECCOMP */
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /**
+ * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
+ * @regs: the pt_regs of the task to trace (current)
+@@ -3192,6 +3196,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+ {
+ user_exit();
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ /*
+ * The tracer may decide to abort the syscall, if so tracehook
+ * will return !0. Note that the tracer may also just change
+@@ -3210,6 +3219,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+ if (regs->gpr[0] >= NR_syscalls)
+ goto skip;
+
++
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->gpr[0]);
+
+@@ -3241,6 +3251,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
+ {
+ int step;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index 27aa913..dc0d9f5 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -1006,7 +1006,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ addr = frame;
+- if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base != ~0UL) {
+ sigret = 0;
+ tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
+ } else {
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 96698fd..fe57485 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -791,7 +791,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
+ tsk->thread.fp_state.fpscr = 0;
+
+ /* Set up to return from userspace. */
+- if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base != ~0UL) {
+ regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp;
+ } else {
+ err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 023a462..9d940854 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -37,6 +37,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/ratelimit.h>
+ #include <linux/context_tracking.h>
++#include <linux/uaccess.h>
+
+ #include <asm/emulated_ops.h>
+ #include <asm/pgtable.h>
+@@ -146,6 +147,8 @@ static unsigned long oops_begin(struct pt_regs *regs)
+ }
+ NOKPROBE_SYMBOL(oops_begin);
+
++extern void gr_handle_kernel_exploit(void);
++
+ static void oops_end(unsigned long flags, struct pt_regs *regs,
+ int signr)
+ {
+@@ -195,6 +198,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
++
++ gr_handle_kernel_exploit();
++
+ do_exit(signr);
+ }
+ NOKPROBE_SYMBOL(oops_end);
+@@ -1162,6 +1168,26 @@ void program_check_exception(struct pt_regs *regs)
+ enum ctx_state prev_state = exception_enter();
+ unsigned int reason = get_reason(regs);
+
++#ifdef CONFIG_PAX_REFCOUNT
++ unsigned int bkpt;
++ const struct exception_table_entry *entry;
++
++ if (reason & REASON_ILLEGAL) {
++ /* Check if PaX bad instruction */
++ if (!probe_kernel_address((const void *)regs->nip, bkpt) && bkpt == 0xc00b00) {
++ current->thread.trap_nr = 0;
++ pax_report_refcount_error(regs, NULL);
++ /* fixup_exception() for PowerPC does not exist, simulate its job */
++ if ((entry = search_exception_tables(regs->nip)) != NULL) {
++ regs->nip = entry->fixup;
++ return;
++ }
++ /* fixup_exception() could not handle */
++ goto bail;
++ }
++ }
++#endif
++
+ /* We can now get here via a FP Unavailable exception if the core
+ * has no FPU, in that case the reason flags will be 0 */
+
+diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
+index 4111d30..fa5e7be 100644
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -35,6 +35,7 @@
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
+ #include <asm/setup.h>
++#include <asm/mman.h>
+
+ #undef DEBUG
+
+@@ -180,7 +181,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = VDSO32_MBASE;
+ #endif
+
+- current->mm->context.vdso_base = 0;
++ current->mm->context.vdso_base = ~0UL;
+
+ /* vDSO has a problem and was disabled, just don't "enable" it for the
+ * process
+@@ -201,7 +202,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+ (vdso_pages << PAGE_SHIFT) +
+ ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+- 0, 0);
++ 0, MAP_PRIVATE | MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
+diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
+index 5eea6f3..5d10396 100644
+--- a/arch/powerpc/lib/usercopy_64.c
++++ b/arch/powerpc/lib/usercopy_64.c
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n)))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_WRITE, to, n)))
+- n = __copy_to_user(to, from, n);
+- return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
+ return n;
+ }
+
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index d0b137d..af92bde 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -34,6 +34,10 @@
+ #include <linux/context_tracking.h>
+ #include <linux/hugetlb.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int __user *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -227,7 +258,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -384,12 +415,16 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
+ * "undefined". Of those that can be set, this is the only
+ * one which seems bad.
+ */
+- if (error_code & 0x10000000)
++ if (error_code & DSISR_GUARDED)
+ /* Guarded storage error. */
+ goto bad_area;
+ #endif /* CONFIG_8xx */
+
+ if (is_exec) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (error_code & DSISR_GUARDED)
++ goto bad_area;
++#endif
+ /*
+ * Allow execution from readable areas if the MMU does not
+ * provide separate controls over reading and executing.
+@@ -484,6 +519,23 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
+ bad_area_nosemaphore:
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++ if (is_exec && regs->nip == address) {
++#endif
++ switch (pax_handle_fetch_fault(regs)) {
++ }
++
++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ _exception(SIGSEGV, regs, code, address);
+ goto bail;
+ }
+diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
+index 2f1e443..de888bf 100644
+--- a/arch/powerpc/mm/mmap.c
++++ b/arch/powerpc/mm/mmap.c
+@@ -194,6 +194,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+@@ -205,9 +209,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
+index 2b27458..7c7c59b 100644
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, addr, len, 0);
+ }
+
+ static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+@@ -276,6 +276,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
+ info.align_offset = 0;
+
+ addr = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr += mm->delta_mmap;
++#endif
++
+ while (addr < TASK_SIZE) {
+ info.low_limit = addr;
+ if (!slice_scan_available(addr, available, 1, &addr))
+@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+ if (fixed && addr > (mm->task_size - len))
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++ addr = 0;
++#endif
++
+ /* If hint, make sure it matches our alignment restrictions */
+ if (!fixed && addr) {
+ addr = _ALIGN_UP(addr, 1ul << pshift);
+@@ -555,10 +566,10 @@ unsigned long arch_get_unmapped_area(struct file *filp,
+ }
+
+ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+- const unsigned long addr0,
+- const unsigned long len,
+- const unsigned long pgoff,
+- const unsigned long flags)
++ unsigned long addr0,
++ unsigned long len,
++ unsigned long pgoff,
++ unsigned long flags)
+ {
+ return slice_get_unmapped_area(addr0, len, flags,
+ current->mm->context.user_psize, 1);
+diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
+index 0625446..139a0aa 100644
+--- a/arch/powerpc/platforms/cell/spufs/file.c
++++ b/arch/powerpc/platforms/cell/spufs/file.c
+@@ -263,9 +263,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ return VM_FAULT_NOPAGE;
+ }
+
+-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
++static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
+ unsigned long address,
+- void *buf, int len, int write)
++ void *buf, size_t len, int write)
+ {
+ struct spu_context *ctx = vma->vm_file->private_data;
+ unsigned long offset = address - vma->vm_start;
+diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
+index 26c5d5be..a308c28 100644
+--- a/arch/s390/Kconfig.debug
++++ b/arch/s390/Kconfig.debug
+@@ -9,6 +9,7 @@ config S390_PTDUMP
+ bool "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ Say Y here if you want to show the kernel pagetable layout in a
+ debugfs file. This information is only useful for kernel developers
+diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
+index d28cc2f..a937312 100644
+--- a/arch/s390/include/asm/atomic.h
++++ b/arch/s390/include/asm/atomic.h
+@@ -342,4 +342,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* __ARCH_S390_ATOMIC__ */
+diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
+index 05219a5..032f5f0 100644
+--- a/arch/s390/include/asm/cache.h
++++ b/arch/s390/include/asm/cache.h
+@@ -9,8 +9,10 @@
+ #ifndef __ARCH_S390_CACHE_H
+ #define __ARCH_S390_CACHE_H
+
+-#define L1_CACHE_BYTES 256
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 8
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define NET_SKB_PAD 32
+
+ #define __read_mostly __section(.data..read_mostly)
+diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
+index 1736c7d..261351c 100644
+--- a/arch/s390/include/asm/elf.h
++++ b/arch/s390/include/asm/elf.h
+@@ -167,6 +167,13 @@ extern unsigned int vdso_enabled;
+ (STACK_TOP / 3 * 2) : \
+ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. */
+
+diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
+index c4a93d6..4d2a9b4 100644
+--- a/arch/s390/include/asm/exec.h
++++ b/arch/s390/include/asm/exec.h
+@@ -7,6 +7,6 @@
+ #ifndef __ASM_EXEC_H
+ #define __ASM_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* __ASM_EXEC_H */
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index a7ef702..8c25ce4 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
+ __range_ok((unsigned long)(addr), (size)); \
+ })
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) __access_ok(addr, size)
+
+ /*
+@@ -337,6 +338,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ return __copy_to_user(to, from, n);
+ }
+
+@@ -360,10 +365,14 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- unsigned int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+
+ might_fault();
+- if (unlikely(sz != -1 && sz < n)) {
++
++ if ((long)n < 0)
++ return n;
++
++ if (unlikely(sz != (size_t)-1 && sz < n)) {
+ if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
+index fdb4042..b72ae72 100644
+--- a/arch/s390/kernel/kprobes.c
++++ b/arch/s390/kernel/kprobes.c
+@@ -269,6 +269,7 @@ static void pop_kprobe(struct kprobe_ctlblk *kcb)
+ }
+ NOKPROBE_SYMBOL(pop_kprobe);
+
++#ifdef CONFIG_KRETPROBES
+ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
+ {
+ ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
+@@ -277,6 +278,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
+ regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
+ }
+ NOKPROBE_SYMBOL(arch_prepare_kretprobe);
++#endif
+
+ static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
+ {
+@@ -740,8 +742,10 @@ int __init arch_init_kprobes(void)
+ return register_kprobe(&trampoline);
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int arch_trampoline_kprobe(struct kprobe *p)
+ {
+ return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
+ }
+ NOKPROBE_SYMBOL(arch_trampoline_kprobe);
++#endif
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index fbc0789..e7962a1 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -163,11 +163,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+
+ /* Increase core size by size of got & plt and set start
+ offsets for got and plt. */
+- me->core_layout.size = ALIGN(me->core_layout.size, 4);
+- me->arch.got_offset = me->core_layout.size;
+- me->core_layout.size += me->arch.got_size;
+- me->arch.plt_offset = me->core_layout.size;
+- me->core_layout.size += me->arch.plt_size;
++ me->core_layout.size_rw = ALIGN(me->core_layout.size_rw, 4);
++ me->arch.got_offset = me->core_layout.size_rw;
++ me->core_layout.size_rw += me->arch.got_size;
++ me->arch.plt_offset = me->core_layout.size_rx;
++ me->core_layout.size_rx += me->arch.plt_size;
+ return 0;
+ }
+
+@@ -283,7 +283,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ if (info->got_initialized == 0) {
+ Elf_Addr *gotent;
+
+- gotent = me->core_layout.base + me->arch.got_offset +
++ gotent = me->core_layout.base_rw + me->arch.got_offset +
+ info->got_offset;
+ *gotent = val;
+ info->got_initialized = 1;
+@@ -306,7 +306,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ rc = apply_rela_bits(loc, val, 0, 64, 0);
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT) {
+- val += (Elf_Addr) me->core_layout.base - loc;
++ val += (Elf_Addr) me->core_layout.base_rw - loc;
+ rc = apply_rela_bits(loc, val, 1, 32, 1);
+ }
+ break;
+@@ -319,7 +319,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
+ if (info->plt_initialized == 0) {
+ unsigned int *ip;
+- ip = me->core_layout.base + me->arch.plt_offset +
++ ip = me->core_layout.base_rx + me->arch.plt_offset +
+ info->plt_offset;
+ ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
+ ip[1] = 0x100a0004;
+@@ -338,7 +338,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ val - loc + 0xffffUL < 0x1ffffeUL) ||
+ (r_type == R_390_PLT32DBL &&
+ val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+- val = (Elf_Addr) me->core_layout.base +
++ val = (Elf_Addr) me->core_layout.base_rx +
+ me->arch.plt_offset +
+ info->plt_offset;
+ val += rela->r_addend - loc;
+@@ -360,7 +360,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
+ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
+ val = val + rela->r_addend -
+- ((Elf_Addr) me->core_layout.base + me->arch.got_offset);
++ ((Elf_Addr) me->core_layout.base_rw + me->arch.got_offset);
+ if (r_type == R_390_GOTOFF16)
+ rc = apply_rela_bits(loc, val, 0, 16, 0);
+ else if (r_type == R_390_GOTOFF32)
+@@ -370,7 +370,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ break;
+ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
+ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
+- val = (Elf_Addr) me->core_layout.base + me->arch.got_offset +
++ val = (Elf_Addr) me->core_layout.base_rw + me->arch.got_offset +
+ rela->r_addend - loc;
+ if (r_type == R_390_GOTPC)
+ rc = apply_rela_bits(loc, val, 1, 32, 0);
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index bba4fa7..9c32b3c 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -217,13 +217,6 @@ unsigned long get_wchan(struct task_struct *p)
+ return 0;
+ }
+
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+ static inline unsigned long brk_rnd(void)
+ {
+ return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index eb9df28..7b686ba 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -201,9 +201,9 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
+ }
+
+ static unsigned long
+-s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
+- const unsigned long len, const unsigned long pgoff,
+- const unsigned long flags)
++s390_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff,
++ unsigned long flags)
+ {
+ struct mm_struct *mm = current->mm;
+ unsigned long area;
+@@ -230,6 +230,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+@@ -239,9 +243,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = mmap_base_legacy(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
+index ae3d59f..f65f075 100644
+--- a/arch/score/include/asm/cache.h
++++ b/arch/score/include/asm/cache.h
+@@ -1,7 +1,9 @@
+ #ifndef _ASM_SCORE_CACHE_H
+ #define _ASM_SCORE_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_SCORE_CACHE_H */
+diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
+index f9f3cd5..58ff438 100644
+--- a/arch/score/include/asm/exec.h
++++ b/arch/score/include/asm/exec.h
+@@ -1,6 +1,6 @@
+ #ifndef _ASM_SCORE_EXEC_H
+ #define _ASM_SCORE_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) (x)
+
+ #endif /* _ASM_SCORE_EXEC_H */
+diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
+index aae9480..93e40a4 100644
+--- a/arch/score/kernel/process.c
++++ b/arch/score/kernel/process.c
+@@ -114,8 +114,3 @@ unsigned long get_wchan(struct task_struct *task)
+
+ return task_pt_regs(task)->cp0_epc;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- return sp;
+-}
+diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
+index ef9e555..331bd29 100644
+--- a/arch/sh/include/asm/cache.h
++++ b/arch/sh/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #define __ASM_SH_CACHE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #include <linux/init.h>
+ #include <cpu/cache.h>
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
+index 83acbf3..fa67491 100644
+--- a/arch/sh/kernel/kprobes.c
++++ b/arch/sh/kernel/kprobes.c
+@@ -72,6 +72,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (*p->addr == BREAKPOINT_INSTRUCTION)
+@@ -79,6 +80,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ /**
+ * If an illegal slot instruction exception occurs for an address
+@@ -203,6 +205,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+ }
+
+ /* Called with kretprobe_lock held */
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -211,6 +214,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->pr = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ static int __kprobes kprobe_handler(struct pt_regs *regs)
+ {
+diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
+index 6777177..d44b592 100644
+--- a/arch/sh/mm/mmap.c
++++ b/arch/sh/mm/mmap.c
+@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int do_colour_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (flags & MAP_FIXED) {
+@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+- info.low_limit = TASK_UNMAPPED_BASE;
++ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+@@ -77,14 +81,15 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ }
+
+ unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+- const unsigned long len, const unsigned long pgoff,
+- const unsigned long flags)
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++ unsigned long len, unsigned long pgoff,
++ unsigned long flags)
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ int do_colour_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (flags & MAP_FIXED) {
+@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ if (do_colour_align)
+@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = TASK_SIZE;
+ addr = vm_unmapped_area(&info);
+ }
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 165ecdd..2bac5bf 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -38,6 +38,7 @@ config SPARC
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
+ select MODULES_USE_ELF_RELA
++ select HAVE_GCC_PLUGINS
+ select ODD_RT_SIGACTION
+ select OLD_SIGSUSPEND
+ select ARCH_HAS_SG_CHAIN
+diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
+index 24827a3..5dd45ac4 100644
+--- a/arch/sparc/include/asm/atomic_64.h
++++ b/arch/sparc/include/asm/atomic_64.h
+@@ -15,18 +15,38 @@
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) READ_ONCE((v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return READ_ONCE(v->counter);
++}
+ #define atomic64_read(v) READ_ONCE((v)->counter)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return READ_ONCE(v->counter);
++}
+
+ #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ WRITE_ONCE(v->counter, i);
++}
+ #define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ WRITE_ONCE(v->counter, i);
++}
+
+-#define ATOMIC_OP(op) \
+-void atomic_##op(int, atomic_t *); \
+-void atomic64_##op(long, atomic64_t *);
++#define __ATOMIC_OP(op, suffix) \
++void atomic_##op##suffix(int, atomic##suffix##_t *); \
++void atomic64_##op##suffix(long, atomic64##suffix##_t *);
+
+-#define ATOMIC_OP_RETURN(op) \
+-int atomic_##op##_return(int, atomic_t *); \
+-long atomic64_##op##_return(long, atomic64_t *);
++#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
++
++#define __ATOMIC_OP_RETURN(op, suffix) \
++int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
++long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
++
++#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
+
+ #define ATOMIC_FETCH_OP(op) \
+ int atomic_fetch_##op(int, atomic_t *); \
+@@ -47,13 +67,23 @@ ATOMIC_OPS(xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ #define atomic_dec_return(v) atomic_sub_return(1, v)
+ #define atomic64_dec_return(v) atomic64_sub_return(1, v)
+
+ #define atomic_inc_return(v) atomic_add_return(1, v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic64_inc_return(v) atomic64_add_return(1, v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_return_unchecked(1, v);
++}
+
+ /*
+ * atomic_inc_and_test - increment and test
+@@ -64,6 +94,10 @@ ATOMIC_OPS(xor)
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_inc_return_unchecked(v) == 0;
++}
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+@@ -73,25 +107,60 @@ ATOMIC_OPS(xor)
+ #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic64_inc(v) atomic64_add(1, v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
++}
+
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+ #define atomic64_dec(v) atomic64_sub(1, v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_sub_unchecked(1, v);
++}
+
+ #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+ #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
+
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%icc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+@@ -101,21 +170,42 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+
+ #define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
++ long new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%xcc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
+index 5bb6991..5c2132e 100644
+--- a/arch/sparc/include/asm/cache.h
++++ b/arch/sparc/include/asm/cache.h
+@@ -7,10 +7,12 @@
+ #ifndef _SPARC_CACHE_H
+ #define _SPARC_CACHE_H
+
++#include <linux/const.h>
++
+ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES 32
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #ifdef CONFIG_SPARC32
+ #define SMP_CACHE_BYTES_SHIFT 5
+diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
+index a24e41f..47677ff 100644
+--- a/arch/sparc/include/asm/elf_32.h
++++ b/arch/sparc/include/asm/elf_32.h
+@@ -114,6 +114,13 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
+index 3f2d403..4385ed9 100644
+--- a/arch/sparc/include/asm/elf_64.h
++++ b/arch/sparc/include/asm/elf_64.h
+@@ -190,6 +190,13 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
++#endif
++
+ extern unsigned long sparc64_elf_hwcap;
+ #define ELF_HWCAP sparc64_elf_hwcap
+
+diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
+index 0346c7e..c5c25b9 100644
+--- a/arch/sparc/include/asm/pgalloc_32.h
++++ b/arch/sparc/include/asm/pgalloc_32.h
+@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+ }
+
+ #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
+
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
+index 3529f13..d98a28c 100644
+--- a/arch/sparc/include/asm/pgalloc_64.h
++++ b/arch/sparc/include/asm/pgalloc_64.h
+@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
+ }
+
+ #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
+
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
+ }
+
+ #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
++#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
+
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
+index 59ba6f6..4518128 100644
+--- a/arch/sparc/include/asm/pgtable.h
++++ b/arch/sparc/include/asm/pgtable.h
+@@ -5,4 +5,8 @@
+ #else
+ #include <asm/pgtable_32.h>
+ #endif
++
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #endif
+diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
+index ce6f569..593b043 100644
+--- a/arch/sparc/include/asm/pgtable_32.h
++++ b/arch/sparc/include/asm/pgtable_32.h
+@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
+ #define PAGE_SHARED SRMMU_PAGE_SHARED
+ #define PAGE_COPY SRMMU_PAGE_COPY
+ #define PAGE_READONLY SRMMU_PAGE_RDONLY
++#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
++#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
++#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
+ #define PAGE_KERNEL SRMMU_PAGE_KERNEL
+
+ /* Top-level page directory - dummy used by init-mm.
+@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
+
+ /* xwr */
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
+ #define __P100 PAGE_READONLY
+ #define __P101 PAGE_READONLY
+ #define __P110 PAGE_COPY
+ #define __P111 PAGE_COPY
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
+ #define __S100 PAGE_READONLY
+ #define __S101 PAGE_READONLY
+ #define __S110 PAGE_SHARED
+diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
+index ae51a11..eadfd03 100644
+--- a/arch/sparc/include/asm/pgtsrmmu.h
++++ b/arch/sparc/include/asm/pgtsrmmu.h
+@@ -111,6 +111,11 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
+index 29d64b1..4272fe8 100644
+--- a/arch/sparc/include/asm/setup.h
++++ b/arch/sparc/include/asm/setup.h
+@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
+ void handle_ld_nf(u32 insn, struct pt_regs *regs);
+
+ /* init_64.c */
+-extern atomic_t dcpage_flushes;
+-extern atomic_t dcpage_flushes_xcall;
++extern atomic_unchecked_t dcpage_flushes;
++extern atomic_unchecked_t dcpage_flushes_xcall;
+
+ extern int sysctl_tsb_ratio;
+ #endif
+diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
+index 07c9f2e..352fff0 100644
+--- a/arch/sparc/include/asm/spinlock_64.h
++++ b/arch/sparc/include/asm/spinlock_64.h
+@@ -103,7 +103,12 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
+ __asm__ __volatile__ (
+ "1: ldsw [%2], %0\n"
+ " brlz,pn %0, 2f\n"
+-"4: add %0, 1, %1\n"
++"4: addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -116,7 +121,7 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
+ " .previous"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+- : "memory");
++ : "memory", "cc");
+ }
+
+ static inline int arch_read_trylock(arch_rwlock_t *lock)
+@@ -127,7 +132,12 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
+ "1: ldsw [%2], %0\n"
+ " brlz,a,pn %0, 2f\n"
+ " mov 0, %0\n"
+-" add %0, 1, %1\n"
++" addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -146,7 +156,12 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
+
+ __asm__ __volatile__(
+ "1: lduw [%2], %0\n"
+-" sub %0, 1, %1\n"
++" subcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%xcc, 1b\n"
+diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
+index 229475f..2fca9163 100644
+--- a/arch/sparc/include/asm/thread_info_32.h
++++ b/arch/sparc/include/asm/thread_info_32.h
+@@ -48,6 +48,7 @@ struct thread_info {
+ struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
+ unsigned long rwbuf_stkptrs[NSWINS];
+ unsigned long w_saved;
++ unsigned long lowest_stack;
+ };
+
+ /*
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
+index 3d7b925..493ce82 100644
+--- a/arch/sparc/include/asm/thread_info_64.h
++++ b/arch/sparc/include/asm/thread_info_64.h
+@@ -59,6 +59,8 @@ struct thread_info {
+ struct pt_regs *kern_una_regs;
+ unsigned int kern_una_insn;
+
++ unsigned long lowest_stack;
++
+ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
+ __attribute__ ((aligned(64)));
+ };
+@@ -180,12 +182,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+ /* flag bit 4 is available */
+ #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
+-/* flag bit 6 is available */
++#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
+ #define TIF_32BIT 7 /* 32-bit binary */
+ #define TIF_NOHZ 8 /* in adaptive nohz mode */
+ #define TIF_SECCOMP 9 /* secure computing */
+ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
++
+ /* NOTE: Thread flags >= 12 should be ones we have no interest
+ * in using in assembly, else we can't use the mask as
+ * an immediate value in instructions such as andcc.
+@@ -205,12 +208,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
+
+ #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
+ _TIF_DO_NOTIFY_RESUME_MASK | \
+ _TIF_NEED_RESCHED)
+ #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
+
++#define _TIF_WORK_SYSCALL \
++ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
++ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
++
+ #define is_32bit_task() (test_thread_flag(TIF_32BIT))
+
+ /*
+diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
+index bd56c28..4b63d83 100644
+--- a/arch/sparc/include/asm/uaccess.h
++++ b/arch/sparc/include/asm/uaccess.h
+@@ -1,5 +1,6 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
+index ea55f86..dbf15cf 100644
+--- a/arch/sparc/include/asm/uaccess_32.h
++++ b/arch/sparc/include/asm/uaccess_32.h
+@@ -47,6 +47,7 @@
+ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
+ #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+ #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) \
+ ({ (void)(type); __access_ok((unsigned long)(addr), size); })
+
+@@ -248,6 +249,9 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
+
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (n && __access_ok((unsigned long) to, n)) {
+ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+@@ -257,12 +261,18 @@ static inline unsigned long copy_to_user(void __user *to, const void *from, unsi
+
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+ }
+
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (n && __access_ok((unsigned long) from, n)) {
+ check_object_size(to, n, false);
+ return __copy_user((__force void __user *) to, from, n);
+@@ -274,6 +284,9 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ return __copy_user((__force void __user *) to, from, n);
+ }
+
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index 5373136..c528f7e 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
++#include <linux/kernel.h>
+ #include <asm/asi.h>
+ #include <asm/spitfire.h>
+ #include <asm-generic/uaccess-unaligned.h>
+@@ -77,6 +78,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
+ return 1;
+ }
+
++static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
++{
++ return 1;
++}
++
+ static inline int access_ok(int type, const void __user * addr, unsigned long size)
+ {
+ return 1;
+@@ -191,6 +197,9 @@ unsigned long __must_check ___copy_from_user(void *to,
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
+ check_object_size(to, size, false);
+
+ return ___copy_from_user(to, from, size);
+@@ -203,6 +212,9 @@ unsigned long __must_check ___copy_to_user(void __user *to,
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
+ check_object_size(from, size, true);
+
+ return ___copy_to_user(to, from, size);
+diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
+index fa3c02d..c9a6309 100644
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -4,7 +4,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+
+diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
+index b0377db..1da3b53 100644
+--- a/arch/sparc/kernel/kprobes.c
++++ b/arch/sparc/kernel/kprobes.c
+@@ -499,6 +499,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+ * value kept in ri->ret_addr so we don't need to keep adjusting it
+ * back and forth.
+ */
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -508,6 +509,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ regs->u_regs[UREG_RETPC] =
+ ((unsigned long)kretprobe_trampoline) - 8;
+ }
++#endif
+
+ /*
+ * Called when the probe at kretprobe trampoline is hit
+diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
+index b7780a5..28315f0 100644
+--- a/arch/sparc/kernel/process_32.c
++++ b/arch/sparc/kernel/process_32.c
+@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
+
+ printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
+ r->psr, r->pc, r->npc, r->y, print_tainted());
+- printk("PC: <%pS>\n", (void *) r->pc);
++ printk("PC: <%pA>\n", (void *) r->pc);
+ printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
+ r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
+ printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
+ r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
+
+ printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ rw = (struct reg_window32 *) fp;
+ pc = rw->ins[7];
+ printk("[%08lx : ", pc);
+- printk("%pS ] ", (void *) pc);
++ printk("%pA ] ", (void *) pc);
+ fp = rw->ins[6];
+ } while (++count < 16);
+ printk("\n");
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index 47ff558..2333c8a 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
+ printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+ rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+ if (regs->tstate & TSTATE_PRIV)
+- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
++ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
+ }
+
+ void show_regs(struct pt_regs *regs)
+@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
+
+ printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
+ regs->tpc, regs->tnpc, regs->y, print_tainted());
+- printk("TPC: <%pS>\n", (void *) regs->tpc);
++ printk("TPC: <%pA>\n", (void *) regs->tpc);
+ printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
+ printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
+ show_regwindow(regs);
+ show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ }
+@@ -278,7 +278,7 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+ ((tp && tp->task) ? tp->task->pid : -1));
+
+ if (gp->tstate & TSTATE_PRIV) {
+- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
+ (void *) gp->tpc,
+ (void *) gp->o7,
+ (void *) gp->i7,
+diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
+index 79cc0d1..46d6233 100644
+--- a/arch/sparc/kernel/prom_common.c
++++ b/arch/sparc/kernel/prom_common.c
+@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
+
+ unsigned int prom_early_allocated __initdata;
+
+-static struct of_pdt_ops prom_sparc_ops __initdata = {
++static const struct of_pdt_ops prom_sparc_ops __initconst = {
+ .nextprop = prom_common_nextprop,
+ .getproplen = prom_getproplen,
+ .getproperty = prom_getproperty,
+diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
+index 7037ca3..070b51b 100644
+--- a/arch/sparc/kernel/ptrace_64.c
++++ b/arch/sparc/kernel/ptrace_64.c
+@@ -1068,6 +1068,10 @@ long arch_ptrace(struct task_struct *child, long request,
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ {
+ int ret = 0;
+@@ -1078,6 +1082,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ if (test_thread_flag(TIF_NOHZ))
+ user_exit();
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ret = tracehook_report_syscall_entry(regs);
+
+@@ -1096,6 +1105,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+ if (test_thread_flag(TIF_NOHZ))
+ user_exit();
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 8182f7c..a5ab37f 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -895,7 +895,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+ return;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+
+ this_cpu = get_cpu();
+@@ -919,7 +919,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpumask_of(cpu));
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes_xcall);
++ atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+ }
+ }
+@@ -938,7 +938,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+ preempt_disable();
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+ data0 = 0;
+ pg_addr = page_address(page);
+@@ -955,7 +955,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpu_online_mask);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes_xcall);
++ atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+ }
+ __local_flush_dcache_page(page);
+diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
+index 646988d..b88905f 100644
+--- a/arch/sparc/kernel/sys_sparc_32.c
++++ b/arch/sparc/kernel/sys_sparc_32.c
+@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (len > TASK_SIZE - PAGE_SIZE)
+ return -ENOMEM;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ info.flags = 0;
+ info.length = len;
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index fe8b8ee..3f17a96 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ struct vm_area_struct * vma;
+ unsigned long task_size = TASK_SIZE;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOR_ALIGN(addr, pgoff);
+@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+- info.low_limit = TASK_UNMAPPED_BASE;
++ info.low_limit = mm->mmap_base;
+ info.high_limit = min(task_size, VA_EXCLUDE_START);
+ info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.low_limit = VA_EXCLUDE_END;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = task_size;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -141,15 +152,16 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ }
+
+ unsigned long
+-arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+- const unsigned long len, const unsigned long pgoff,
+- const unsigned long flags)
++arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++ unsigned long len, unsigned long pgoff,
++ unsigned long flags)
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long task_size = STACK_TOP32;
+ unsigned long addr = addr0;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ /* This should only ever run for 32-bit processes. */
+@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ if (do_color_align)
+@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ /*
+@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = STACK_TOP32;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
+ EXPORT_SYMBOL(get_fb_unmapped_area);
+
+ /* Essentially the same as PowerPC. */
+-static unsigned long mmap_rnd(void)
++static unsigned long mmap_rnd(struct mm_struct *mm)
+ {
+ unsigned long rnd = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE) {
+ unsigned long val = get_random_long();
+ if (test_thread_flag(TIF_32BIT))
+@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
+
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+- unsigned long random_factor = mmap_rnd();
++ unsigned long random_factor = mmap_rnd(mm);
+ unsigned long gap;
+
+ /*
+@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ gap == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ /* We know it's 32-bit */
+@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index c4a1b5c..c5e0ef3 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
+ #endif
+ .align 32
+ 1: ldx [%g6 + TI_FLAGS], %l5
+- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l5, _TIF_WORK_SYSCALL, %g0
+ be,pt %icc, rtrap
+ nop
+ call syscall_trace_leave
+@@ -230,7 +230,7 @@ linux_sparc_syscall32:
+
+ srl %i3, 0, %o3 ! IEU0
+ srl %i2, 0, %o2 ! IEU0 Group
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ bne,pn %icc, linux_syscall_trace32 ! CTI
+ mov %i0, %l5 ! IEU1
+ 5: call %l7 ! CTI Group brk forced
+@@ -254,7 +254,7 @@ linux_sparc_syscall:
+
+ mov %i3, %o3 ! IEU1
+ mov %i4, %o4 ! IEU0 Group
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ bne,pn %icc, linux_syscall_trace ! CTI Group
+ mov %i0, %l5 ! IEU0
+ 2: call %l7 ! CTI Group brk forced
+@@ -269,7 +269,7 @@ ret_sys_call:
+
+ cmp %o0, -ERESTART_RESTARTBLOCK
+ bgeu,pn %xcc, 1f
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+
+ 2:
+diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
+index 4f21df7..0a374da 100644
+--- a/arch/sparc/kernel/traps_32.c
++++ b/arch/sparc/kernel/traps_32.c
+@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
+ #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
+ #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
+
++extern void gr_handle_kernel_exploit(void);
++
+ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ count++ < 30 &&
+ (((unsigned long) rw) >= PAGE_OFFSET) &&
+ !(((unsigned long) rw) & 0x7)) {
+- printk("Caller[%08lx]: %pS\n", rw->ins[7],
++ printk("Caller[%08lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+ rw = (struct reg_window32 *)rw->ins[6];
+ }
+ }
+ printk("Instruction DUMP:");
+ instruction_dump ((unsigned long *) regs->pc);
+- if(regs->psr & PSR_PS)
++ if(regs->psr & PSR_PS) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index 4094a51..4a360da 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
+ i + 1,
+ p->trapstack[i].tstate, p->trapstack[i].tpc,
+ p->trapstack[i].tnpc, p->trapstack[i].tt);
+- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
+ }
+ }
+
+@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
+
+ lvl -= 0x100;
+ if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_error(regs, NULL);
++#endif
++
+ sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+ die_if_kernel(buffer, regs);
+ }
+@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+ char buffer[32];
+-
++
+ if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_error(regs, NULL);
++#endif
++
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
+ regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
+ printk("%s" "ERROR(%d): ",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
+- printk("TPC<%pS>\n", (void *) regs->tpc);
++ printk("TPC<%pA>\n", (void *) regs->tpc);
+ printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
+ panic("Irrecoverable Cheetah+ parity error.");
+ }
+
+@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
+ }
+
+ struct sun4v_error_entry {
+@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
+ /*0x38*/u64 reserved_5;
+ };
+
+-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
+-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
+
+ static const char *sun4v_err_type_to_str(u8 type)
+ {
+@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
+ }
+
+ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
+- int cpu, const char *pfx, atomic_t *ocnt)
++ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
+ {
+ u64 *raw_ptr = (u64 *) ent;
+ u32 attrs;
+@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
+
+ show_regs(regs);
+
+- if ((cnt = atomic_read(ocnt)) != 0) {
+- atomic_set(ocnt, 0);
++ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
++ atomic_set_unchecked(ocnt, 0);
+ wmb();
+ printk("%s: Queue overflowed %d times.\n",
+ pfx, cnt);
+@@ -2048,7 +2059,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
+ */
+ void sun4v_resum_overflow(struct pt_regs *regs)
+ {
+- atomic_inc(&sun4v_resum_oflow_cnt);
++ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
+ }
+
+ /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
+@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
+ /* XXX Actually even this can make not that much sense. Perhaps
+ * XXX we should just pull the plug and panic directly from here?
+ */
+- atomic_inc(&sun4v_nonresum_oflow_cnt);
++ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
+ }
+
+ static void sun4v_tlb_error(struct pt_regs *regs)
+@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ fp = (unsigned long)sf->fp + STACK_BIAS;
+ }
+
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = tsk->curr_ret_stack;
+ if (tsk->ret_stack && index >= graph) {
+ pc = tsk->ret_stack[index - graph].ret;
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ graph++;
+ }
+ }
+@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+ return (struct reg_window *) (fp + STACK_BIAS);
+ }
+
++extern void gr_handle_kernel_exploit(void);
++
+ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ while (rw &&
+ count++ < 30 &&
+ kstack_valid(tp, (unsigned long) rw)) {
+- printk("Caller[%016lx]: %pS\n", rw->ins[7],
++ printk("Caller[%016lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+
+ rw = kernel_stack_up(rw);
+@@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ }
+ if (panic_on_oops)
+ panic("Fatal exception");
+- if (regs->tstate & TSTATE_PRIV)
++ if (regs->tstate & TSTATE_PRIV) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+ EXPORT_SYMBOL(die_if_kernel);
+diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
+index 52c00d9..6f8aa4e 100644
+--- a/arch/sparc/kernel/unaligned_64.c
++++ b/arch/sparc/kernel/unaligned_64.c
+@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+
+ if (__ratelimit(&ratelimit)) {
+- printk("Kernel unaligned access at TPC[%lx] %pS\n",
++ printk("Kernel unaligned access at TPC[%lx] %pA\n",
+ regs->tpc, (void *) regs->tpc);
+ }
+ }
+diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
+index 69912d2..6c0c227 100644
+--- a/arch/sparc/lib/Makefile
++++ b/arch/sparc/lib/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi -DST_DIV0=0x02
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ lib-$(CONFIG_SPARC32) += ashrdi3.o
+ lib-$(CONFIG_SPARC32) += memcpy.o memset.o
+diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
+index 1c6a1bd..93e9698 100644
+--- a/arch/sparc/lib/atomic_64.S
++++ b/arch/sparc/lib/atomic_64.S
+@@ -17,11 +17,22 @@
+ * barriers.
+ */
+
+-#define ATOMIC_OP(op) \
+-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
++#ifdef CONFIG_PAX_REFCOUNT
++#define __REFCOUNT_OP(op) op##cc
++#define __OVERFLOW_IOP tvs %icc, 6;
++#define __OVERFLOW_XOP tvs %xcc, 6;
++#else
++#define __REFCOUNT_OP(op) op
++#define __OVERFLOW_IOP
++#define __OVERFLOW_XOP
++#endif
++
++#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
++ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+ 1: lduw [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ cas [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
+@@ -29,14 +40,18 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ retl; \
+ nop; \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+-ENDPROC(atomic_##op); \
+-EXPORT_SYMBOL(atomic_##op);
++ENDPROC(atomic_##op##suffix); \
++EXPORT_SYMBOL(atomic_##op##suffix);
+
+-#define ATOMIC_OP_RETURN(op) \
+-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
++#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
++ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
++
++#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
++ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
+ BACKOFF_SETUP(%o2); \
+ 1: lduw [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ cas [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
+@@ -44,8 +59,11 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ retl; \
+ sra %g1, 0, %o0; \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+-ENDPROC(atomic_##op##_return); \
+-EXPORT_SYMBOL(atomic_##op##_return);
++ENDPROC(atomic_##op##_return##suffix); \
++EXPORT_SYMBOL(atomic_##op##_return##suffix)
++
++#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
++ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
+
+ #define ATOMIC_FETCH_OP(op) \
+ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+@@ -77,13 +95,16 @@ ATOMIC_OPS(xor)
+ #undef ATOMIC_OPS
+ #undef ATOMIC_FETCH_OP
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+-#define ATOMIC64_OP(op) \
+-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
++#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
++ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+ 1: ldx [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ casx [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
+@@ -91,14 +112,18 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ retl; \
+ nop; \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+-ENDPROC(atomic64_##op); \
+-EXPORT_SYMBOL(atomic64_##op);
++ENDPROC(atomic64_##op##suffix); \
++EXPORT_SYMBOL(atomic64_##op##suffix);
+
+-#define ATOMIC64_OP_RETURN(op) \
+-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
++#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
++ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
++ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
+ BACKOFF_SETUP(%o2); \
+ 1: ldx [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ casx [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
+@@ -106,8 +131,11 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ retl; \
+ op %g1, %o0, %o0; \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+-ENDPROC(atomic64_##op##_return); \
+-EXPORT_SYMBOL(atomic64_##op##_return);
++ENDPROC(atomic64_##op##_return##suffix); \
++EXPORT_SYMBOL(atomic64_##op##_return##suffix);
++
++#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
++ __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
+
+ #define ATOMIC64_FETCH_OP(op) \
+ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+@@ -139,7 +167,12 @@ ATOMIC64_OPS(xor)
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_FETCH_OP
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_XOP
++#undef __OVERFLOW_IOP
++#undef __REFCOUNT_OP
+
+ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
+index 30c3ecc..736f015 100644
+--- a/arch/sparc/mm/Makefile
++++ b/arch/sparc/mm/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
+ obj-y += fault_$(BITS).o
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index 4714061..bad7f9a 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -22,6 +22,9 @@
+ #include <linux/interrupt.h>
+ #include <linux/kdebug.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->pc);
++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->pc);
++
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++ unsigned int addr;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, bajmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ else
++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->pc-4));
++ err |= get_user(call, (unsigned int *)regs->pc);
++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+ int text_fault)
+ {
+@@ -226,6 +500,24 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 643c149..845c113 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -23,6 +23,9 @@
+ #include <linux/percpu.h>
+ #include <linux/context_tracking.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
+ printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+ regs->tpc);
+ printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
+- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
+ printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
+ dump_stack();
+ unhandled_fault(regs->tpc, current, regs);
+@@ -276,6 +279,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
+ show_regs(regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->tpc);
++
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++ unsigned long addr;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, bajmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ else
++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int sethi, mov1, call, mov2;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(call, (unsigned int *)(regs->tpc+8));
++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or, (unsigned int *)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020U &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++
++ /* PaX: 64-bit PLT stub */
++ err = get_user(sethi1, (unsigned int *)addr);
++ err |= get_user(sethi2, (unsigned int *)(addr+4));
++ err |= get_user(or1, (unsigned int *)(addr+8));
++ err |= get_user(or2, (unsigned int *)(addr+12));
++ err |= get_user(sllx, (unsigned int *)(addr+16));
++ err |= get_user(add, (unsigned int *)(addr+20));
++ err |= get_user(jmpl, (unsigned int *)(addr+24));
++ err |= get_user(nop, (unsigned int *)(addr+28));
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x88112000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x89293020U &&
++ add == 0x8A010005U &&
++ jmpl == 0x89C14000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G4] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
++ regs->u_regs[UREG_G4] = addr + 24;
++ addr = regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->tpc-4));
++ err |= get_user(call, (unsigned int *)regs->tpc);
++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ dl_resolve &= 0xFFFFFFFFUL;
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (ba & 0xFFF00000U) == 0x30600000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ enum ctx_state prev_state = exception_enter();
+@@ -350,6 +813,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index 988acc8b..f26345c 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -26,8 +26,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+- unsigned long flags)
++ unsigned long flags,
++ unsigned long offset)
+ {
++ struct mm_struct *mm = current->mm;
+ unsigned long task_size = TASK_SIZE;
+ struct vm_unmapped_area_info info;
+
+@@ -36,15 +38,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+
+ info.flags = 0;
+ info.length = len;
+- info.low_limit = TASK_UNMAPPED_BASE;
++ info.low_limit = mm->mmap_base;
+ info.high_limit = min(task_size, VA_EXCLUDE_START);
+ info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.low_limit = VA_EXCLUDE_END;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = task_size;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -53,10 +62,11 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ }
+
+ static unsigned long
+-hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+- const unsigned long len,
+- const unsigned long pgoff,
+- const unsigned long flags)
++hugetlb_get_unmapped_area_topdown(struct file *filp, unsigned long addr0,
++ unsigned long len,
++ unsigned long pgoff,
++ unsigned long flags,
++ unsigned long offset)
+ {
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+@@ -71,6 +81,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ /*
+@@ -83,6 +94,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = STACK_TOP32;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -97,6 +114,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long task_size = TASK_SIZE;
++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+
+ if (test_thread_flag(TIF_32BIT))
+ task_size = STACK_TOP32;
+@@ -112,19 +130,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ return addr;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+ return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+- pgoff, flags);
++ pgoff, flags, offset);
+ else
+ return hugetlb_get_unmapped_area_topdown(file, addr, len,
+- pgoff, flags);
++ pgoff, flags, offset);
+ }
+
+ pte_t *huge_pte_alloc(struct mm_struct *mm,
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 37aa537..06b756c 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -189,9 +189,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
+ int num_kernel_image_mappings;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-atomic_t dcpage_flushes = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
+ #ifdef CONFIG_SMP
+-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
+ #endif
+ #endif
+
+@@ -199,7 +199,7 @@ inline void flush_dcache_page_impl(struct page *page)
+ {
+ BUG_ON(tlb_type == hypervisor);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+
+ #ifdef DCACHE_ALIASING_POSSIBLE
+@@ -462,10 +462,10 @@ void mmu_info(struct seq_file *m)
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+ seq_printf(m, "DCPageFlushes\t: %d\n",
+- atomic_read(&dcpage_flushes));
++ atomic_read_unchecked(&dcpage_flushes));
+ #ifdef CONFIG_SMP
+ seq_printf(m, "DCPageFlushesXC\t: %d\n",
+- atomic_read(&dcpage_flushes_xcall));
++ atomic_read_unchecked(&dcpage_flushes_xcall));
+ #endif /* CONFIG_SMP */
+ #endif /* CONFIG_DEBUG_DCFLUSH */
+ }
+diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
+index 4583c03..5e074bb 100644
+--- a/arch/tile/Kconfig
++++ b/arch/tile/Kconfig
+@@ -192,6 +192,7 @@ source "kernel/Kconfig.hz"
+ config KEXEC
+ bool "kexec system call"
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
+index 4cefa0c..98d8b83 100644
+--- a/arch/tile/include/asm/atomic_64.h
++++ b/arch/tile/include/asm/atomic_64.h
+@@ -195,6 +195,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* _ASM_TILE_ATOMIC_64_H */
+diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
+index 4810e48..08b733b 100644
+--- a/arch/tile/include/asm/cache.h
++++ b/arch/tile/include/asm/cache.h
+@@ -15,11 +15,12 @@
+ #ifndef _ASM_TILE_CACHE_H
+ #define _ASM_TILE_CACHE_H
+
++#include <linux/const.h>
+ #include <arch/chip.h>
+
+ /* bytes per L1 data cache line */
+ #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /* bytes per L2 cache line */
+ #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
+diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
+index a77369e..7ba6ecd 100644
+--- a/arch/tile/include/asm/uaccess.h
++++ b/arch/tile/include/asm/uaccess.h
+@@ -428,9 +428,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+ unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+
+- if (likely(sz == -1 || sz >= n))
++ if (likely(sz == (size_t)-1 || sz >= n))
+ n = _copy_from_user(to, from, n);
+ else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
+index c68694b..12bf0cb 100644
+--- a/arch/tile/kernel/kprobes.c
++++ b/arch/tile/kernel/kprobes.c
+@@ -430,6 +430,7 @@ static void __used kretprobe_trampoline_holder(void)
+
+ void kretprobe_trampoline(void);
+
++#ifdef CONFIG_KRETPROBES
+ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+ {
+@@ -438,6 +439,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ /* Replace the return addr with trampoline addr */
+ regs->lr = (unsigned long)kretprobe_trampoline;
+ }
++#endif
+
+ /*
+ * Called when the probe at kretprobe trampoline is hit.
+@@ -507,6 +509,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ return 1;
+ }
+
++#ifdef CONFIG_KRETPROBES
+ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+ {
+ if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
+@@ -514,6 +517,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+
+ return 0;
+ }
++#endif
+
+ static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)kretprobe_trampoline,
+diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
+index 77ceaa3..3630dea 100644
+--- a/arch/tile/mm/hugetlbpage.c
++++ b/arch/tile/mm/hugetlbpage.c
+@@ -174,6 +174,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+ info.high_limit = TASK_SIZE;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.align_offset = 0;
++ info.threadstack_offset = 0;
+ return vm_unmapped_area(&info);
+ }
+
+@@ -191,6 +192,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+ info.high_limit = current->mm->mmap_base;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.align_offset = 0;
++ info.threadstack_offset = 0;
+ addr = vm_unmapped_area(&info);
+
+ /*
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 0ca46ede..8d7fd38 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -73,6 +73,8 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
+ -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \
+ -idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__
+
++USER_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(USER_CFLAGS))
++
+ #This will adjust *FLAGS accordingly to the platform.
+ include $(ARCH_DIR)/Makefile-os-$(OS)
+
+diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
+index 6208702..00292c8 100644
+--- a/arch/um/drivers/line.c
++++ b/arch/um/drivers/line.c
+@@ -377,7 +377,7 @@ int setup_one_line(struct line *lines, int n, char *init,
+ struct tty_driver *driver = line->driver->driver;
+ int err = -EINVAL;
+
+- if (line->port.count) {
++ if (atomic_read(&line->port.count)) {
+ *error_out = "Device is already open";
+ goto out;
+ }
+diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
+index 19e1bdd..3665b77 100644
+--- a/arch/um/include/asm/cache.h
++++ b/arch/um/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef __UM_CACHE_H
+ #define __UM_CACHE_H
+
++#include <linux/const.h>
+
+ #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
+ # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+@@ -12,6 +13,6 @@
+ # define L1_CACHE_SHIFT 5
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif
+diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
+index 2e0a6b1..a64d0f5 100644
+--- a/arch/um/include/asm/kmap_types.h
++++ b/arch/um/include/asm/kmap_types.h
+@@ -8,6 +8,6 @@
+
+ /* No more #include "asm/arch/kmap_types.h" ! */
+
+-#define KM_TYPE_NR 14
++#define KM_TYPE_NR 15
+
+ #endif
+diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
+index f878bec..ca09300 100644
+--- a/arch/um/include/asm/page.h
++++ b/arch/um/include/asm/page.h
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ struct page;
+diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
+index bae8523..ba9484b 100644
+--- a/arch/um/include/asm/pgtable-3level.h
++++ b/arch/um/include/asm/pgtable-3level.h
+@@ -58,6 +58,7 @@
+ #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
+ #define pud_populate(mm, pud, pmd) \
+ set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ #ifdef CONFIG_64BIT
+ #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 034b42c7..5c186ce 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -343,22 +343,6 @@ int singlestepping(void * t)
+ return 2;
+ }
+
+-/*
+- * Only x86 and x86_64 have an arch_align_stack().
+- * All other arches have "#define arch_align_stack(x) (x)"
+- * in their asm/exec.h
+- * As this is included in UML from asm-um/system-generic.h,
+- * we can use it to behave as the subarch does.
+- */
+-#ifndef arch_align_stack
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-#endif
+-
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ unsigned long stack_page, sp, ip;
+diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
+index ad8f795..2c7eec6 100644
+--- a/arch/unicore32/include/asm/cache.h
++++ b/arch/unicore32/include/asm/cache.h
+@@ -12,8 +12,10 @@
+ #ifndef __UNICORE_CACHE_H__
+ #define __UNICORE_CACHE_H__
+
+-#define L1_CACHE_SHIFT (5)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#include <linux/const.h>
++
++#define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index bada636..1775eac 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -39,14 +39,13 @@ config X86
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+- select ARCH_SUPPORTS_INT128 if X86_64
++ select ARCH_SUPPORTS_INT128 if X86_64 && !PAX_SIZE_OVERFLOW_EXTRA && !PAX_SIZE_OVERFLOW
+ select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF if X86_64
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
+- select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANT_FRAME_POINTERS
+ select ARCH_WANT_IPC_PARSE_VERSION if X86_32
+ select BUILDTIME_EXTABLE_SORT
+@@ -94,7 +93,7 @@ config X86
+ select HAVE_ARCH_WITHIN_STACK_FRAMES
+ select HAVE_EBPF_JIT if X86_64
+ select HAVE_ARCH_VMAP_STACK if X86_64
+- select HAVE_CC_STACKPROTECTOR
++ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
+ select HAVE_CMPXCHG_DOUBLE
+ select HAVE_CMPXCHG_LOCAL
+ select HAVE_CONTEXT_TRACKING if X86_64
+@@ -136,6 +135,7 @@ config X86
+ select HAVE_NMI
+ select HAVE_OPROFILE
+ select HAVE_OPTPROBES
++ select HAVE_PAX_INITIFY_INIT_EXIT if GCC_PLUGINS
+ select HAVE_PCSPKR_PLATFORM
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_EVENTS_NMI
+@@ -190,11 +190,13 @@ config MMU
+ def_bool y
+
+ config ARCH_MMAP_RND_BITS_MIN
+- default 28 if 64BIT
++ default 28 if 64BIT && !PAX_PER_CPU_PGD
++ default 27 if 64BIT && PAX_PER_CPU_PGD
+ default 8
+
+ config ARCH_MMAP_RND_BITS_MAX
+- default 32 if 64BIT
++ default 32 if 64BIT && !PAX_PER_CPU_PGD
++ default 27 if 64BIT && PAX_PER_CPU_PGD
+ default 16
+
+ config ARCH_MMAP_RND_COMPAT_BITS_MIN
+@@ -296,7 +298,7 @@ config X86_64_SMP
+
+ config X86_32_LAZY_GS
+ def_bool y
+- depends on X86_32 && !CC_STACKPROTECTOR
++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+
+ config ARCH_SUPPORTS_UPROBES
+ def_bool y
+@@ -690,6 +692,7 @@ config SCHED_OMIT_FRAME_POINTER
+
+ menuconfig HYPERVISOR_GUEST
+ bool "Linux guest support"
++ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
+ ---help---
+ Say Y here to enable options for running Linux under various hyper-
+ visors. This option enables basic hypervisor detection and platform
+@@ -1090,6 +1093,7 @@ config VM86
+
+ config X86_16BIT
+ bool "Enable support for 16-bit segments" if EXPERT
++ depends on !GRKERNSEC
+ default y
+ depends on MODIFY_LDT_SYSCALL
+ ---help---
+@@ -1244,6 +1248,7 @@ choice
+
+ config NOHIGHMEM
+ bool "off"
++ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ However, the address space of 32-bit x86 processors is only 4
+@@ -1280,6 +1285,7 @@ config NOHIGHMEM
+
+ config HIGHMEM4G
+ bool "4GB"
++ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Select this if you have a 32-bit processor and between 1 and 4
+ gigabytes of physical RAM.
+@@ -1332,7 +1338,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1353,7 +1359,6 @@ config X86_PAE
+
+ config ARCH_PHYS_ADDR_T_64BIT
+ def_bool y
+- depends on X86_64 || X86_PAE
+
+ config ARCH_DMA_ADDR_T_64BIT
+ def_bool y
+@@ -1484,7 +1489,7 @@ config ARCH_PROC_KCORE_TEXT
+
+ config ILLEGAL_POINTER_VALUE
+ hex
+- default 0 if X86_32
++ default 0xfffff000 if X86_32
+ default 0xdead000000000000 if X86_64
+
+ source "mm/Kconfig"
+@@ -1807,6 +1812,7 @@ source kernel/Kconfig.hz
+ config KEXEC
+ bool "kexec system call"
+ select KEXEC_CORE
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -1934,7 +1940,7 @@ config RELOCATABLE
+
+ config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image (KASLR)"
+- depends on RELOCATABLE
++ depends on RELOCATABLE && BROKEN_SECURITY
+ default n
+ ---help---
+ In support of Kernel Address Space Layout Randomization (KASLR),
+@@ -1978,7 +1984,9 @@ config X86_NEED_RELOCS
+
+ config PHYSICAL_ALIGN
+ hex "Alignment value to which kernel should be aligned"
+- default "0x200000"
++ default "0x1000000"
++ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
++ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
+ range 0x2000 0x1000000 if X86_32
+ range 0x200000 0x1000000 if X86_64
+ ---help---
+@@ -2093,6 +2101,7 @@ config COMPAT_VDSO
+ def_bool n
+ prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
+ depends on X86_32 || IA32_EMULATION
++ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ ---help---
+ Certain buggy versions of glibc will crash if they are
+ presented with a 32-bit vDSO that is not mapped at the address
+@@ -2133,15 +2142,6 @@ choice
+
+ If unsure, select "Emulate".
+
+- config LEGACY_VSYSCALL_NATIVE
+- bool "Native"
+- help
+- Actual executable code is located in the fixed vsyscall
+- address mapping, implementing time() efficiently. Since
+- this makes the mapping executable, it can be used during
+- security vulnerability exploitation (traditionally as
+- ROP gadgets). This configuration is not recommended.
+-
+ config LEGACY_VSYSCALL_EMULATE
+ bool "Emulate"
+ help
+@@ -2222,6 +2222,22 @@ config MODIFY_LDT_SYSCALL
+
+ Saying 'N' here may make sense for embedded or server kernels.
+
++config DEFAULT_MODIFY_LDT_SYSCALL
++ bool "Allow userspace to modify the LDT by default"
++ default y
++
++ ---help---
++ Modifying the LDT (Local Descriptor Table) may be needed to run a
++ 16-bit or segmented code such as Dosemu or Wine. This is done via
++ a system call which is not needed to run portable applications,
++ and which can sometimes be abused to exploit some weaknesses of
++ the architecture, opening new vulnerabilities.
++
++ For this reason this option allows one to enable or disable the
++ feature at runtime. It is recommended to say 'N' here to leave
++ the system protected, and to enable it at runtime only if needed
++ by setting the sys.kernel.modify_ldt sysctl.
++
+ source "kernel/livepatch/Kconfig"
+
+ endmenu
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index 3ba5ff2..44bdacc 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -329,7 +329,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486
++ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
+
+ config X86_INVD_BUG
+ def_bool y
+@@ -337,7 +337,7 @@ config X86_INVD_BUG
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -379,7 +379,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index 67eec55..1a5c1ab 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -55,6 +55,7 @@ config X86_PTDUMP
+ tristate "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ select X86_PTDUMP_CORE
+ ---help---
+ Say Y here if you want to show the kernel pagetable layout in a
+@@ -84,6 +85,7 @@ config DEBUG_RODATA_TEST
+
+ config DEBUG_WX
+ bool "Warn on W+X mappings at boot"
++ depends on BROKEN
+ select X86_PTDUMP_CORE
+ ---help---
+ Generate a warning if any W+X mappings are found at boot.
+@@ -111,7 +113,7 @@ config DEBUG_WX
+
+ config DEBUG_SET_MODULE_RONX
+ bool "Set loadable kernel module data as NX and text as RO"
+- depends on MODULES
++ depends on MODULES && BROKEN
+ ---help---
+ This option helps catch unintended modifications to loadable
+ kernel module's text and read-only data. It also prevents execution
+@@ -353,6 +355,7 @@ config X86_DEBUG_FPU
+ config PUNIT_ATOM_DEBUG
+ tristate "ATOM Punit debug driver"
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ select IOSF_MBI
+ ---help---
+ This is a debug driver, which gets the power states
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 2d44933..86ecceb 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -75,9 +75,6 @@ ifeq ($(CONFIG_X86_32),y)
+ # CPU-specific tuning. Anything which can be shared with UML should go here.
+ include arch/x86/Makefile_32.cpu
+ KBUILD_CFLAGS += $(cflags-y)
+-
+- # temporary until string.h is fixed
+- KBUILD_CFLAGS += -ffreestanding
+ else
+ BITS := 64
+ UTS_MACHINE := x86_64
+@@ -126,6 +123,9 @@ else
+ KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
+ endif
+
++# temporary until string.h is fixed
++KBUILD_CFLAGS += -ffreestanding
++
+ ifdef CONFIG_X86_X32
+ x32_ld_ok := $(call try-run,\
+ /bin/echo -e '1: .quad 1b' | \
+@@ -192,6 +192,7 @@ archheaders:
+ $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
+
+ archprepare:
++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+ ifeq ($(CONFIG_KEXEC_FILE),y)
+ $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
+ endif
+@@ -278,3 +279,9 @@ define archhelp
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 12ea8f8..46969be 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -11,6 +11,7 @@
+
+ KASAN_SANITIZE := n
+ OBJECT_FILES_NON_STANDARD := y
++GCC_PLUGINS := n
+
+ # Kernel does not boot with kcov instrumentation here.
+ # One of the problems observed was insertion of __sanitizer_cov_trace_pc()
+diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
+index 0d41d68..2d6120c 100644
+--- a/arch/x86/boot/bitops.h
++++ b/arch/x86/boot/bitops.h
+@@ -28,7 +28,7 @@ static inline bool variable_test_bit(int nr, const void *addr)
+ bool v;
+ const u32 *p = (const u32 *)addr;
+
+- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ return v;
+ }
+
+@@ -39,7 +39,7 @@ static inline bool variable_test_bit(int nr, const void *addr)
+
+ static inline void set_bit(int nr, void *addr)
+ {
+- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+
+ #endif /* BOOT_BITOPS_H */
+diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
+index e5612f3..e755d05 100644
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -84,7 +84,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+ u16 seg;
+- asm("movw %%ds,%0" : "=rm" (seg));
++ asm volatile("movw %%ds,%0" : "=rm" (seg));
+ return seg;
+ }
+
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 34d9e15..c26e047 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -18,6 +18,7 @@
+
+ KASAN_SANITIZE := n
+ OBJECT_FILES_NON_STANDARD := y
++GCC_PLUGINS := n
+
+ # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+ KCOV_INSTRUMENT := n
+@@ -35,6 +36,23 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+
++ifdef CONFIG_DEBUG_INFO
++ifdef CONFIG_DEBUG_INFO_SPLIT
++KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
++else
++KBUILD_CFLAGS += -g
++endif
++KBUILD_AFLAGS += -Wa,--gdwarf-2
++endif
++ifdef CONFIG_DEBUG_INFO_DWARF4
++KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
++endif
++
++ifdef CONFIG_DEBUG_INFO_REDUCED
++KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
++ $(call cc-option,-fno-var-tracking)
++endif
++
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+ UBSAN_SANITIZE :=n
+diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
+index a53440e..c3dbf1e 100644
+--- a/arch/x86/boot/compressed/efi_stub_32.S
++++ b/arch/x86/boot/compressed/efi_stub_32.S
+@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
+ * parameter 2, ..., param n. To make things easy, we save the return
+ * address of efi_call_phys in a global variable.
+ */
+- popl %ecx
+- movl %ecx, saved_return_addr(%edx)
+- /* get the function pointer into ECX*/
+- popl %ecx
+- movl %ecx, efi_rt_function_ptr(%edx)
++ popl saved_return_addr(%edx)
++ popl efi_rt_function_ptr(%edx)
+
+ /*
+ * 3. Call the physical function.
+ */
+- call *%ecx
++ call *efi_rt_function_ptr(%edx)
+
+ /*
+ * 4. Balance the stack. And because EAX contain the return value,
+@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
+ 1: popl %edx
+ subl $1b, %edx
+
+- movl efi_rt_function_ptr(%edx), %ecx
+- pushl %ecx
++ pushl efi_rt_function_ptr(%edx)
+
+ /*
+ * 10. Push the saved return address onto the stack and return.
+ */
+- movl saved_return_addr(%edx), %ecx
+- pushl %ecx
+- ret
++ jmpl *saved_return_addr(%edx)
+ ENDPROC(efi_call_phys)
+ .previous
+
+diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S
+index 99494dff..7fa59bf 100644
+--- a/arch/x86/boot/compressed/efi_stub_64.S
++++ b/arch/x86/boot/compressed/efi_stub_64.S
+@@ -2,4 +2,5 @@
+ #include <asm/msr.h>
+ #include <asm/processor-flags.h>
+
++#define efi_call efi_call_early
+ #include "../../platform/efi/efi_stub_64.S"
+diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
+index 630384a..278e788 100644
+--- a/arch/x86/boot/compressed/efi_thunk_64.S
++++ b/arch/x86/boot/compressed/efi_thunk_64.S
+@@ -189,8 +189,8 @@ efi_gdt64:
+ .long 0 /* Filled out by user */
+ .word 0
+ .quad 0x0000000000000000 /* NULL descriptor */
+- .quad 0x00af9a000000ffff /* __KERNEL_CS */
+- .quad 0x00cf92000000ffff /* __KERNEL_DS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
+ .quad 0x0080890000000000 /* TS descriptor */
+ .quad 0x0000000000000000 /* TS continued */
+ efi_gdt64_end:
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index fd0b6a2..7206864 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -169,10 +169,10 @@ preferred_addr:
+ addl %eax, %ebx
+ notl %eax
+ andl %eax, %ebx
+- cmpl $LOAD_PHYSICAL_ADDR, %ebx
++ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
+ #endif
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ 1:
+
+ /* Target address to relocate to for decompression */
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index efdfba2..af6d962 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -103,10 +103,10 @@ ENTRY(startup_32)
+ addl %eax, %ebx
+ notl %eax
+ andl %eax, %ebx
+- cmpl $LOAD_PHYSICAL_ADDR, %ebx
++ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
+ #endif
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ 1:
+
+ /* Target address to relocate to for decompression */
+@@ -333,10 +333,10 @@ preferred_addr:
+ addq %rax, %rbp
+ notq %rax
+ andq %rax, %rbp
+- cmpq $LOAD_PHYSICAL_ADDR, %rbp
++ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
+ jge 1f
+ #endif
+- movq $LOAD_PHYSICAL_ADDR, %rbp
++ movq $____LOAD_PHYSICAL_ADDR, %rbp
+ 1:
+
+ /* Target address to relocate to for decompression */
+@@ -444,8 +444,8 @@ gdt:
+ .long gdt
+ .word 0
+ .quad 0x0000000000000000 /* NULL descriptor */
+- .quad 0x00af9a000000ffff /* __KERNEL_CS */
+- .quad 0x00cf92000000ffff /* __KERNEL_DS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
+ .quad 0x0080890000000000 /* TS descriptor */
+ .quad 0x0000000000000000 /* TS continued */
+ gdt_end:
+@@ -465,7 +465,7 @@ efi32_config:
+ .global efi64_config
+ efi64_config:
+ .fill 4,8,0
+- .quad efi_call
++ .quad efi_call_early
+ .byte 1
+ #endif /* CONFIG_EFI_STUB */
+
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index b3c5a5f0..596115e 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -176,13 +176,17 @@ static void handle_relocations(void *output, unsigned long output_len,
+ int *reloc;
+ unsigned long delta, map, ptr;
+ unsigned long min_addr = (unsigned long)output;
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ unsigned long max_addr = min_addr + (VO___bss_start - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR);
++#else
+ unsigned long max_addr = min_addr + (VO___bss_start - VO__text);
++#endif
+
+ /*
+ * Calculate the delta between where vmlinux was linked to load
+ * and where it was actually loaded.
+ */
+- delta = min_addr - LOAD_PHYSICAL_ADDR;
++ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
+
+ /*
+ * The kernel contains a table of relocation addresses. Those
+@@ -199,7 +203,7 @@ static void handle_relocations(void *output, unsigned long output_len,
+ * from __START_KERNEL_map.
+ */
+ if (IS_ENABLED(CONFIG_X86_64))
+- delta = virt_addr - LOAD_PHYSICAL_ADDR;
++ delta = virt_addr - ____LOAD_PHYSICAL_ADDR;
+
+ if (!delta) {
+ debug_putstr("No relocation needed... ");
+@@ -274,7 +278,7 @@ static void parse_elf(void *output)
+ Elf32_Ehdr ehdr;
+ Elf32_Phdr *phdrs, *phdr;
+ #endif
+- void *dest;
++ void *dest, *prev;
+ int i;
+
+ memcpy(&ehdr, output, sizeof(ehdr));
+@@ -301,11 +305,14 @@ static void parse_elf(void *output)
+ case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+ dest = output;
+- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+ dest = (void *)(phdr->p_paddr);
+ #endif
+ memmove(dest, output + phdr->p_offset, phdr->p_filesz);
++ if (i)
++ memset(prev, 0xff, dest - prev);
++ prev = dest + phdr->p_filesz;
+ break;
+ default: /* Ignore other PT_* */ break;
+ }
+@@ -337,7 +344,11 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+ unsigned char *output,
+ unsigned long output_len)
+ {
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++ const unsigned long kernel_total_size = VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR;
++#else
+ const unsigned long kernel_total_size = VO__end - VO__text;
++#endif
+ unsigned long virt_addr = (unsigned long)output;
+
+ /* Retain x86 boot parameters pointer passed from startup_32/64. */
+@@ -395,7 +406,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
+ error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+ error("Destination address does not match LOAD_PHYSICAL_ADDR");
+ if ((unsigned long)output != virt_addr)
+ error("Destination virtual address changed when not relocatable");
+diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
+index 56589d0..f2085be 100644
+--- a/arch/x86/boot/compressed/pagetable.c
++++ b/arch/x86/boot/compressed/pagetable.c
+@@ -14,6 +14,7 @@
+ */
+ #define __pa(x) ((unsigned long)(x))
+ #define __va(x) ((void *)((unsigned long)(x)))
++#undef CONFIG_PAX_KERNEXEC
+
+ #include "misc.h"
+
+diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
+index 4ad7d70..c703963 100644
+--- a/arch/x86/boot/cpucheck.c
++++ b/arch/x86/boot/cpucheck.c
+@@ -126,9 +126,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_K7_HWCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax &= ~(1 << 15);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ get_cpuflags(); /* Make sure it really did something */
+ err = check_cpuflags();
+@@ -141,9 +141,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_VIA_FCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax |= (1<<1)|(1<<7);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ set_bit(X86_FEATURE_CX8, cpu.flags);
+ err = check_cpuflags();
+@@ -154,12 +154,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 eax, edx;
+ u32 level = 1;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+- asm("cpuid"
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++ asm volatile("cpuid"
+ : "+a" (level), "=d" (cpu.flags[0])
+ : : "ecx", "ebx");
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ err = check_cpuflags();
+ } else if (err == 0x01 &&
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index 3dd5be3..16720a2 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -438,7 +438,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
+ # single linked list of
+ # struct setup_data
+
+-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
+
+ #
+ # Getting to provably safe in-place decompression is hard. Worst case
+@@ -543,7 +543,12 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
+
+ #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_min_extract_offset)
+
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
++#else
+ #define VO_INIT_SIZE (VO__end - VO__text)
++#endif
++
+ #if ZO_INIT_SIZE > VO_INIT_SIZE
+ # define INIT_SIZE ZO_INIT_SIZE
+ #else
+diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
+index db75d07..8e6d0af 100644
+--- a/arch/x86/boot/memory.c
++++ b/arch/x86/boot/memory.c
+@@ -19,7 +19,7 @@
+
+ static int detect_memory_e820(void)
+ {
+- int count = 0;
++ unsigned int count = 0;
+ struct biosregs ireg, oreg;
+ struct e820entry *desc = boot_params.e820_map;
+ static struct e820entry buf; /* static so it is zeroed */
+diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
+index ba3e100..6501b8f 100644
+--- a/arch/x86/boot/video-vesa.c
++++ b/arch/x86/boot/video-vesa.c
+@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
+
+ boot_params.screen_info.vesapm_seg = oreg.es;
+ boot_params.screen_info.vesapm_off = oreg.di;
++ boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+
+ /*
+diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
+index 77780e3..86be0cb 100644
+--- a/arch/x86/boot/video.c
++++ b/arch/x86/boot/video.c
+@@ -100,7 +100,7 @@ static void store_mode_params(void)
+ static unsigned int get_entry(void)
+ {
+ char entry_buf[4];
+- int i, len = 0;
++ unsigned int i, len = 0;
+ int key;
+ unsigned int v;
+
+diff --git a/arch/x86/crypto/aes-i586-asm_32.S b/arch/x86/crypto/aes-i586-asm_32.S
+index 2849dbc..d7ff39c 100644
+--- a/arch/x86/crypto/aes-i586-asm_32.S
++++ b/arch/x86/crypto/aes-i586-asm_32.S
+@@ -38,6 +38,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/alternative-asm.h>
+
+ #define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words)
+
+@@ -286,7 +287,7 @@ ENTRY(aes_enc_blk)
+ pop %ebx
+ mov %r0,(%ebp)
+ pop %ebp
+- ret
++ pax_ret aes_enc_blk
+ ENDPROC(aes_enc_blk)
+
+ // AES (Rijndael) Decryption Subroutine
+@@ -358,5 +359,5 @@ ENTRY(aes_dec_blk)
+ pop %ebx
+ mov %r0,(%ebp)
+ pop %ebp
+- ret
++ pax_ret aes_dec_blk
+ ENDPROC(aes_dec_blk)
+diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
+index 9105655..cf81747 100644
+--- a/arch/x86/crypto/aes-x86_64-asm_64.S
++++ b/arch/x86/crypto/aes-x86_64-asm_64.S
+@@ -8,6 +8,8 @@
+ * including this sentence is retained in full.
+ */
+
++#include <asm/alternative-asm.h>
++
+ .extern crypto_ft_tab
+ .extern crypto_it_tab
+ .extern crypto_fl_tab
+@@ -77,7 +79,7 @@
+ movl r6 ## E,4(r9); \
+ movl r7 ## E,8(r9); \
+ movl r8 ## E,12(r9); \
+- ret; \
++ pax_ret FUNC; \
+ ENDPROC(FUNC);
+
+ #define round(TAB,OFFSET,r1,r2,r3,r4,r5,r6,r7,r8,ra,rb,rc,rd) \
+diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+index a916c4a..7e7b7cf 100644
+--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
++++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+@@ -64,6 +64,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+
+ #define CONCAT(a,b) a##b
+ #define VMOVDQ vmovdqu
+@@ -436,7 +437,7 @@ ddq_add_8:
+
+ /* main body of aes ctr load */
+
+-.macro do_aes_ctrmain key_len
++.macro do_aes_ctrmain func key_len
+ cmp $16, num_bytes
+ jb .Ldo_return2\key_len
+
+@@ -537,7 +538,7 @@ ddq_add_8:
+ /* return updated IV */
+ vpshufb xbyteswap, xcounter, xcounter
+ vmovdqu xcounter, (p_iv)
+- ret
++ pax_ret \func
+ .endm
+
+ /*
+@@ -549,7 +550,7 @@ ddq_add_8:
+ */
+ ENTRY(aes_ctr_enc_128_avx_by8)
+ /* call the aes main loop */
+- do_aes_ctrmain KEY_128
++ do_aes_ctrmain aes_ctr_enc_128_avx_by8 KEY_128
+
+ ENDPROC(aes_ctr_enc_128_avx_by8)
+
+@@ -562,7 +563,7 @@ ENDPROC(aes_ctr_enc_128_avx_by8)
+ */
+ ENTRY(aes_ctr_enc_192_avx_by8)
+ /* call the aes main loop */
+- do_aes_ctrmain KEY_192
++ do_aes_ctrmain aes_ctr_enc_192_avx_by8 KEY_192
+
+ ENDPROC(aes_ctr_enc_192_avx_by8)
+
+@@ -575,6 +576,6 @@ ENDPROC(aes_ctr_enc_192_avx_by8)
+ */
+ ENTRY(aes_ctr_enc_256_avx_by8)
+ /* call the aes main loop */
+- do_aes_ctrmain KEY_256
++ do_aes_ctrmain aes_ctr_enc_256_avx_by8 KEY_256
+
+ ENDPROC(aes_ctr_enc_256_avx_by8)
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index 383a6f8..dc7f45d 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -32,6 +32,7 @@
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * The following macros are used to move an (un)aligned 16 byte value to/from
+@@ -218,7 +219,7 @@ enc: .octa 0x2
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -228,8 +229,8 @@ enc: .octa 0x2
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ MOVADQ SHUF_MASK(%rip), %xmm14
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r15 # %r15 = aadLen
++ mov %r15, %r11
+ pxor %xmm\i, %xmm\i
+
+ _get_AAD_loop\num_initial_blocks\operation:
+@@ -238,17 +239,17 @@ _get_AAD_loop\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+ pxor \TMP1, %xmm\i
+ add $4, %r10
+- sub $4, %r12
++ sub $4, %r15
+ jne _get_AAD_loop\num_initial_blocks\operation
+
+ cmp $16, %r11
+ je _get_AAD_loop2_done\num_initial_blocks\operation
+
+- mov $16, %r12
++ mov $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+- sub $4, %r12
+- cmp %r11, %r12
++ sub $4, %r15
++ cmp %r11, %r15
+ jne _get_AAD_loop2\num_initial_blocks\operation
+
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -453,8 +454,8 @@ _initial_blocks_done\num_initial_blocks\operation:
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ MOVADQ SHUF_MASK(%rip), %xmm14
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r15 # %r15 = aadLen
++ mov %r15, %r11
+ pxor %xmm\i, %xmm\i
+ _get_AAD_loop\num_initial_blocks\operation:
+ movd (%r10), \TMP1
+@@ -462,15 +463,15 @@ _get_AAD_loop\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+ pxor \TMP1, %xmm\i
+ add $4, %r10
+- sub $4, %r12
++ sub $4, %r15
+ jne _get_AAD_loop\num_initial_blocks\operation
+ cmp $16, %r11
+ je _get_AAD_loop2_done\num_initial_blocks\operation
+- mov $16, %r12
++ mov $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+- sub $4, %r12
+- cmp %r11, %r12
++ sub $4, %r15
++ cmp %r11, %r15
+ jne _get_AAD_loop2\num_initial_blocks\operation
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+ PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
+@@ -1280,8 +1281,8 @@ _esb_loop_\@:
+ * poly = x^128 + x^127 + x^126 + x^121 + 1
+ *
+ *****************************************************************************/
+-ENTRY(aesni_gcm_dec)
+- push %r12
++RAP_ENTRY(aesni_gcm_dec)
++ push %r15
+ push %r13
+ push %r14
+ mov %rsp, %r14
+@@ -1291,8 +1292,8 @@ ENTRY(aesni_gcm_dec)
+ */
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp # align rsp to 64 bytes
+- mov %arg6, %r12
+- movdqu (%r12), %xmm13 # %xmm13 = HashKey
++ mov %arg6, %r15
++ movdqu (%r15), %xmm13 # %xmm13 = HashKey
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm13
+
+@@ -1320,10 +1321,10 @@ ENTRY(aesni_gcm_dec)
+ movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
+ mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
+ and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
+- mov %r13, %r12
+- and $(3<<4), %r12
++ mov %r13, %r15
++ and $(3<<4), %r15
+ jz _initial_num_blocks_is_0_decrypt
+- cmp $(2<<4), %r12
++ cmp $(2<<4), %r15
+ jb _initial_num_blocks_is_1_decrypt
+ je _initial_num_blocks_is_2_decrypt
+ _initial_num_blocks_is_3_decrypt:
+@@ -1373,16 +1374,16 @@ _zero_cipher_left_decrypt:
+ sub $16, %r11
+ add %r13, %r11
+ movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
+- lea SHIFT_MASK+16(%rip), %r12
+- sub %r13, %r12
++ lea SHIFT_MASK+16(%rip), %r15
++ sub %r13, %r15
+ # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
+ # (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
+
+ movdqa %xmm1, %xmm2
+ pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
+ # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm2
+@@ -1411,9 +1412,9 @@ _less_than_8_bytes_left_decrypt:
+ sub $1, %r13
+ jne _less_than_8_bytes_left_decrypt
+ _multiple_of_16_bytes_decrypt:
+- mov arg8, %r12 # %r13 = aadLen (number of bytes)
+- shl $3, %r12 # convert into number of bits
+- movd %r12d, %xmm15 # len(A) in %xmm15
++ mov arg8, %r15 # %r13 = aadLen (number of bytes)
++ shl $3, %r15 # convert into number of bits
++ movd %r15d, %xmm15 # len(A) in %xmm15
+ shl $3, %arg4 # len(C) in bits (*128)
+ MOVQ_R64_XMM %arg4, %xmm1
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+@@ -1452,8 +1453,8 @@ _return_T_done_decrypt:
+ mov %r14, %rsp
+ pop %r14
+ pop %r13
+- pop %r12
+- ret
++ pop %r15
++ pax_ret aesni_gcm_dec
+ ENDPROC(aesni_gcm_dec)
+
+
+@@ -1540,8 +1541,8 @@ ENDPROC(aesni_gcm_dec)
+ *
+ * poly = x^128 + x^127 + x^126 + x^121 + 1
+ ***************************************************************************/
+-ENTRY(aesni_gcm_enc)
+- push %r12
++RAP_ENTRY(aesni_gcm_enc)
++ push %r15
+ push %r13
+ push %r14
+ mov %rsp, %r14
+@@ -1551,8 +1552,8 @@ ENTRY(aesni_gcm_enc)
+ #
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp
+- mov %arg6, %r12
+- movdqu (%r12), %xmm13
++ mov %arg6, %r15
++ movdqu (%r15), %xmm13
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm13
+
+@@ -1576,13 +1577,13 @@ ENTRY(aesni_gcm_enc)
+ movdqa %xmm13, HashKey(%rsp)
+ mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
+ and $-16, %r13
+- mov %r13, %r12
++ mov %r13, %r15
+
+ # Encrypt first few blocks
+
+- and $(3<<4), %r12
++ and $(3<<4), %r15
+ jz _initial_num_blocks_is_0_encrypt
+- cmp $(2<<4), %r12
++ cmp $(2<<4), %r15
+ jb _initial_num_blocks_is_1_encrypt
+ je _initial_num_blocks_is_2_encrypt
+ _initial_num_blocks_is_3_encrypt:
+@@ -1635,14 +1636,14 @@ _zero_cipher_left_encrypt:
+ sub $16, %r11
+ add %r13, %r11
+ movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
+- lea SHIFT_MASK+16(%rip), %r12
+- sub %r13, %r12
++ lea SHIFT_MASK+16(%rip), %r15
++ sub %r13, %r15
+ # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+ # (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
+ pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
+ # get the appropriate mask to mask out top 16-r13 bytes of xmm0
+ pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
+ movdqa SHUF_MASK(%rip), %xmm10
+@@ -1675,9 +1676,9 @@ _less_than_8_bytes_left_encrypt:
+ sub $1, %r13
+ jne _less_than_8_bytes_left_encrypt
+ _multiple_of_16_bytes_encrypt:
+- mov arg8, %r12 # %r12 = addLen (number of bytes)
+- shl $3, %r12
+- movd %r12d, %xmm15 # len(A) in %xmm15
++ mov arg8, %r15 # %r15 = addLen (number of bytes)
++ shl $3, %r15
++ movd %r15d, %xmm15 # len(A) in %xmm15
+ shl $3, %arg4 # len(C) in bits (*128)
+ MOVQ_R64_XMM %arg4, %xmm1
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+@@ -1716,8 +1717,8 @@ _return_T_done_encrypt:
+ mov %r14, %rsp
+ pop %r14
+ pop %r13
+- pop %r12
+- ret
++ pop %r15
++ pax_ret aesni_gcm_enc
+ ENDPROC(aesni_gcm_enc)
+
+ #endif
+@@ -1734,7 +1735,7 @@ _key_expansion_256a:
+ pxor %xmm1, %xmm0
+ movaps %xmm0, (TKEYP)
+ add $0x10, TKEYP
+- ret
++ pax_ret _key_expansion_128
+ ENDPROC(_key_expansion_128)
+ ENDPROC(_key_expansion_256a)
+
+@@ -1760,7 +1761,7 @@ _key_expansion_192a:
+ shufps $0b01001110, %xmm2, %xmm1
+ movaps %xmm1, 0x10(TKEYP)
+ add $0x20, TKEYP
+- ret
++ pax_ret _key_expansion_192a
+ ENDPROC(_key_expansion_192a)
+
+ .align 4
+@@ -1780,7 +1781,7 @@ _key_expansion_192b:
+
+ movaps %xmm0, (TKEYP)
+ add $0x10, TKEYP
+- ret
++ pax_ret _key_expansion_192b
+ ENDPROC(_key_expansion_192b)
+
+ .align 4
+@@ -1793,7 +1794,7 @@ _key_expansion_256b:
+ pxor %xmm1, %xmm2
+ movaps %xmm2, (TKEYP)
+ add $0x10, TKEYP
+- ret
++ pax_ret _key_expansion_256b
+ ENDPROC(_key_expansion_256b)
+
+ /*
+@@ -1820,72 +1821,72 @@ ENTRY(aesni_set_key)
+ movaps %xmm2, (TKEYP)
+ add $0x10, TKEYP
+ AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x1 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x2 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x4 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x8 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x10 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ AESKEYGENASSIST 0x20 %xmm0 %xmm1
+- call _key_expansion_256b
++ pax_direct_call _key_expansion_256b
+ AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7
+- call _key_expansion_256a
++ pax_direct_call _key_expansion_256a
+ jmp .Ldec_key
+ .Lenc_key192:
+ movq 0x10(UKEYP), %xmm2 # other user key
+ AESKEYGENASSIST 0x1 %xmm2 %xmm1 # round 1
+- call _key_expansion_192a
++ pax_direct_call _key_expansion_192a
+ AESKEYGENASSIST 0x2 %xmm2 %xmm1 # round 2
+- call _key_expansion_192b
++ pax_direct_call _key_expansion_192b
+ AESKEYGENASSIST 0x4 %xmm2 %xmm1 # round 3
+- call _key_expansion_192a
++ pax_direct_call _key_expansion_192a
+ AESKEYGENASSIST 0x8 %xmm2 %xmm1 # round 4
+- call _key_expansion_192b
++ pax_direct_call _key_expansion_192b
+ AESKEYGENASSIST 0x10 %xmm2 %xmm1 # round 5
+- call _key_expansion_192a
++ pax_direct_call _key_expansion_192a
+ AESKEYGENASSIST 0x20 %xmm2 %xmm1 # round 6
+- call _key_expansion_192b
++ pax_direct_call _key_expansion_192b
+ AESKEYGENASSIST 0x40 %xmm2 %xmm1 # round 7
+- call _key_expansion_192a
++ pax_direct_call _key_expansion_192a
+ AESKEYGENASSIST 0x80 %xmm2 %xmm1 # round 8
+- call _key_expansion_192b
++ pax_direct_call _key_expansion_192b
+ jmp .Ldec_key
+ .Lenc_key128:
+ AESKEYGENASSIST 0x1 %xmm0 %xmm1 # round 1
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x2 %xmm0 %xmm1 # round 2
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x4 %xmm0 %xmm1 # round 3
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x8 %xmm0 %xmm1 # round 4
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x10 %xmm0 %xmm1 # round 5
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x20 %xmm0 %xmm1 # round 6
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x40 %xmm0 %xmm1 # round 7
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x80 %xmm0 %xmm1 # round 8
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x1b %xmm0 %xmm1 # round 9
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ AESKEYGENASSIST 0x36 %xmm0 %xmm1 # round 10
+- call _key_expansion_128
++ pax_direct_call _key_expansion_128
+ .Ldec_key:
+ sub $0x10, TKEYP
+ movaps (KEYP), %xmm0
+@@ -1908,13 +1909,13 @@ ENTRY(aesni_set_key)
+ popl KEYP
+ #endif
+ FRAME_END
+- ret
++ pax_ret aesni_set_key
+ ENDPROC(aesni_set_key)
+
+ /*
+ * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+ */
+-ENTRY(aesni_enc)
++RAP_ENTRY(aesni_enc)
+ FRAME_BEGIN
+ #ifndef __x86_64__
+ pushl KEYP
+@@ -1925,14 +1926,14 @@ ENTRY(aesni_enc)
+ #endif
+ movl 480(KEYP), KLEN # key length
+ movups (INP), STATE # input
+- call _aesni_enc1
++ pax_direct_call _aesni_enc1
+ movups STATE, (OUTP) # output
+ #ifndef __x86_64__
+ popl KLEN
+ popl KEYP
+ #endif
+ FRAME_END
+- ret
++ pax_ret aesni_enc
+ ENDPROC(aesni_enc)
+
+ /*
+@@ -1990,7 +1991,7 @@ _aesni_enc1:
+ AESENC KEY STATE
+ movaps 0x70(TKEYP), KEY
+ AESENCLAST KEY STATE
+- ret
++ pax_ret _aesni_enc1
+ ENDPROC(_aesni_enc1)
+
+ /*
+@@ -2099,13 +2100,13 @@ _aesni_enc4:
+ AESENCLAST KEY STATE2
+ AESENCLAST KEY STATE3
+ AESENCLAST KEY STATE4
+- ret
++ pax_ret _aesni_enc4
+ ENDPROC(_aesni_enc4)
+
+ /*
+ * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+ */
+-ENTRY(aesni_dec)
++RAP_ENTRY(aesni_dec)
+ FRAME_BEGIN
+ #ifndef __x86_64__
+ pushl KEYP
+@@ -2117,14 +2118,14 @@ ENTRY(aesni_dec)
+ mov 480(KEYP), KLEN # key length
+ add $240, KEYP
+ movups (INP), STATE # input
+- call _aesni_dec1
++ pax_direct_call _aesni_dec1
+ movups STATE, (OUTP) #output
+ #ifndef __x86_64__
+ popl KLEN
+ popl KEYP
+ #endif
+ FRAME_END
+- ret
++ pax_ret aesni_dec
+ ENDPROC(aesni_dec)
+
+ /*
+@@ -2182,7 +2183,7 @@ _aesni_dec1:
+ AESDEC KEY STATE
+ movaps 0x70(TKEYP), KEY
+ AESDECLAST KEY STATE
+- ret
++ pax_ret _aesni_dec1
+ ENDPROC(_aesni_dec1)
+
+ /*
+@@ -2291,7 +2292,7 @@ _aesni_dec4:
+ AESDECLAST KEY STATE2
+ AESDECLAST KEY STATE3
+ AESDECLAST KEY STATE4
+- ret
++ pax_ret _aesni_dec4
+ ENDPROC(_aesni_dec4)
+
+ /*
+@@ -2322,7 +2323,7 @@ ENTRY(aesni_ecb_enc)
+ movups 0x10(INP), STATE2
+ movups 0x20(INP), STATE3
+ movups 0x30(INP), STATE4
+- call _aesni_enc4
++ pax_direct_call _aesni_enc4
+ movups STATE1, (OUTP)
+ movups STATE2, 0x10(OUTP)
+ movups STATE3, 0x20(OUTP)
+@@ -2337,7 +2338,7 @@ ENTRY(aesni_ecb_enc)
+ .align 4
+ .Lecb_enc_loop1:
+ movups (INP), STATE1
+- call _aesni_enc1
++ pax_direct_call _aesni_enc1
+ movups STATE1, (OUTP)
+ sub $16, LEN
+ add $16, INP
+@@ -2351,7 +2352,7 @@ ENTRY(aesni_ecb_enc)
+ popl LEN
+ #endif
+ FRAME_END
+- ret
++ pax_ret aesni_ecb_enc
+ ENDPROC(aesni_ecb_enc)
+
+ /*
+@@ -2383,7 +2384,7 @@ ENTRY(aesni_ecb_dec)
+ movups 0x10(INP), STATE2
+ movups 0x20(INP), STATE3
+ movups 0x30(INP), STATE4
+- call _aesni_dec4
++ pax_direct_call _aesni_dec4
+ movups STATE1, (OUTP)
+ movups STATE2, 0x10(OUTP)
+ movups STATE3, 0x20(OUTP)
+@@ -2398,7 +2399,7 @@ ENTRY(aesni_ecb_dec)
+ .align 4
+ .Lecb_dec_loop1:
+ movups (INP), STATE1
+- call _aesni_dec1
++ pax_direct_call _aesni_dec1
+ movups STATE1, (OUTP)
+ sub $16, LEN
+ add $16, INP
+@@ -2412,7 +2413,7 @@ ENTRY(aesni_ecb_dec)
+ popl LEN
+ #endif
+ FRAME_END
+- ret
++ pax_ret aesni_ecb_dec
+ ENDPROC(aesni_ecb_dec)
+
+ /*
+@@ -2440,7 +2441,7 @@ ENTRY(aesni_cbc_enc)
+ .Lcbc_enc_loop:
+ movups (INP), IN # load input
+ pxor IN, STATE
+- call _aesni_enc1
++ pax_direct_call _aesni_enc1
+ movups STATE, (OUTP) # store output
+ sub $16, LEN
+ add $16, INP
+@@ -2456,7 +2457,7 @@ ENTRY(aesni_cbc_enc)
+ popl IVP
+ #endif
+ FRAME_END
+- ret
++ pax_ret aesni_cbc_enc
+ ENDPROC(aesni_cbc_enc)
+
+ /*
+@@ -2500,7 +2501,7 @@ ENTRY(aesni_cbc_dec)
+ movups 0x30(INP), IN2
+ movaps IN2, STATE4
+ #endif
+- call _aesni_dec4
++ pax_direct_call _aesni_dec4
+ pxor IV, STATE1
+ #ifdef __x86_64__
+ pxor IN1, STATE2
+@@ -2530,7 +2531,7 @@ ENTRY(aesni_cbc_dec)
+ .Lcbc_dec_loop1:
+ movups (INP), IN
+ movaps IN, STATE
+- call _aesni_dec1
++ pax_direct_call _aesni_dec1
+ pxor IV, STATE
+ movups STATE, (OUTP)
+ movaps IN, IV
+@@ -2549,7 +2550,7 @@ ENTRY(aesni_cbc_dec)
+ popl IVP
+ #endif
+ FRAME_END
+- ret
++ pax_ret aesni_cbc_dec
+ ENDPROC(aesni_cbc_dec)
+
+ #ifdef __x86_64__
+@@ -2578,7 +2579,7 @@ _aesni_inc_init:
+ mov $1, TCTR_LOW
+ MOVQ_R64_XMM TCTR_LOW INC
+ MOVQ_R64_XMM CTR TCTR_LOW
+- ret
++ pax_ret _aesni_inc_init
+ ENDPROC(_aesni_inc_init)
+
+ /*
+@@ -2607,37 +2608,37 @@ _aesni_inc:
+ .Linc_low:
+ movaps CTR, IV
+ PSHUFB_XMM BSWAP_MASK IV
+- ret
++ pax_ret _aesni_inc
+ ENDPROC(_aesni_inc)
+
+ /*
+ * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+ * size_t len, u8 *iv)
+ */
+-ENTRY(aesni_ctr_enc)
++RAP_ENTRY(aesni_ctr_enc)
+ FRAME_BEGIN
+ cmp $16, LEN
+ jb .Lctr_enc_just_ret
+ mov 480(KEYP), KLEN
+ movups (IVP), IV
+- call _aesni_inc_init
++ pax_direct_call _aesni_inc_init
+ cmp $64, LEN
+ jb .Lctr_enc_loop1
+ .align 4
+ .Lctr_enc_loop4:
+ movaps IV, STATE1
+- call _aesni_inc
++ pax_direct_call _aesni_inc
+ movups (INP), IN1
+ movaps IV, STATE2
+- call _aesni_inc
++ pax_direct_call _aesni_inc
+ movups 0x10(INP), IN2
+ movaps IV, STATE3
+- call _aesni_inc
++ pax_direct_call _aesni_inc
+ movups 0x20(INP), IN3
+ movaps IV, STATE4
+- call _aesni_inc
++ pax_direct_call _aesni_inc
+ movups 0x30(INP), IN4
+- call _aesni_enc4
++ pax_direct_call _aesni_enc4
+ pxor IN1, STATE1
+ movups STATE1, (OUTP)
+ pxor IN2, STATE2
+@@ -2656,9 +2657,9 @@ ENTRY(aesni_ctr_enc)
+ .align 4
+ .Lctr_enc_loop1:
+ movaps IV, STATE
+- call _aesni_inc
++ pax_direct_call _aesni_inc
+ movups (INP), IN
+- call _aesni_enc1
++ pax_direct_call _aesni_enc1
+ pxor IN, STATE
+ movups STATE, (OUTP)
+ sub $16, LEN
+@@ -2670,7 +2671,7 @@ ENTRY(aesni_ctr_enc)
+ movups IV, (IVP)
+ .Lctr_enc_just_ret:
+ FRAME_END
+- ret
++ pax_ret aesni_ctr_enc
+ ENDPROC(aesni_ctr_enc)
+
+ /*
+@@ -2734,7 +2735,7 @@ ENTRY(aesni_xts_crypt8)
+ pxor INC, STATE4
+ movdqu IV, 0x30(OUTP)
+
+- call *%r11
++ pax_indirect_call "%r11", _aesni_enc4
+
+ movdqu 0x00(OUTP), INC
+ pxor INC, STATE1
+@@ -2779,7 +2780,7 @@ ENTRY(aesni_xts_crypt8)
+ _aesni_gf128mul_x_ble()
+ movups IV, (IVP)
+
+- call *%r11
++ pax_indirect_call "%r11", _aesni_enc4
+
+ movdqu 0x40(OUTP), INC
+ pxor INC, STATE1
+@@ -2798,7 +2799,7 @@ ENTRY(aesni_xts_crypt8)
+ movdqu STATE4, 0x70(OUTP)
+
+ FRAME_END
+- ret
++ pax_ret aesni_xts_crypt8
+ ENDPROC(aesni_xts_crypt8)
+
+ #endif
+diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
+index 522ab68..782ae42 100644
+--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
++++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
+@@ -121,6 +121,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+
+ .data
+ .align 16
+@@ -1486,7 +1487,7 @@ ENTRY(aesni_gcm_precomp_avx_gen2)
+ pop %r14
+ pop %r13
+ pop %r12
+- ret
++ pax_ret aesni_gcm_precomp_avx_gen2
+ ENDPROC(aesni_gcm_precomp_avx_gen2)
+
+ ###############################################################################
+@@ -1507,7 +1508,7 @@ ENDPROC(aesni_gcm_precomp_avx_gen2)
+ ###############################################################################
+ ENTRY(aesni_gcm_enc_avx_gen2)
+ GCM_ENC_DEC_AVX ENC
+- ret
++ pax_ret aesni_gcm_enc_avx_gen2
+ ENDPROC(aesni_gcm_enc_avx_gen2)
+
+ ###############################################################################
+@@ -1528,7 +1529,7 @@ ENDPROC(aesni_gcm_enc_avx_gen2)
+ ###############################################################################
+ ENTRY(aesni_gcm_dec_avx_gen2)
+ GCM_ENC_DEC_AVX DEC
+- ret
++ pax_ret aesni_gcm_dec_avx_gen2
+ ENDPROC(aesni_gcm_dec_avx_gen2)
+ #endif /* CONFIG_AS_AVX */
+
+@@ -2762,7 +2763,7 @@ ENTRY(aesni_gcm_precomp_avx_gen4)
+ pop %r14
+ pop %r13
+ pop %r12
+- ret
++ pax_ret aesni_gcm_precomp_avx_gen4
+ ENDPROC(aesni_gcm_precomp_avx_gen4)
+
+
+@@ -2784,7 +2785,7 @@ ENDPROC(aesni_gcm_precomp_avx_gen4)
+ ###############################################################################
+ ENTRY(aesni_gcm_enc_avx_gen4)
+ GCM_ENC_DEC_AVX2 ENC
+- ret
++ pax_ret aesni_gcm_enc_avx_gen4
+ ENDPROC(aesni_gcm_enc_avx_gen4)
+
+ ###############################################################################
+@@ -2805,7 +2806,7 @@ ENDPROC(aesni_gcm_enc_avx_gen4)
+ ###############################################################################
+ ENTRY(aesni_gcm_dec_avx_gen4)
+ GCM_ENC_DEC_AVX2 DEC
+- ret
++ pax_ret aesni_gcm_dec_avx_gen4
+ ENDPROC(aesni_gcm_dec_avx_gen4)
+
+ #endif /* CONFIG_AS_AVX2 */
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index aa8b067..f9da224 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -71,9 +71,9 @@ struct aesni_xts_ctx {
+
+ asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ unsigned int key_len);
+-asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
++asmlinkage void aesni_enc(void *ctx, u8 *out,
+ const u8 *in);
+-asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
++asmlinkage void aesni_dec(void *ctx, u8 *out,
+ const u8 *in);
+ asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len);
+@@ -83,6 +83,15 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
+ asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
++int _key_expansion_128(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash;
++int _key_expansion_192a(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash;
++int _key_expansion_192b(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash;
++int _key_expansion_256a(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash;
++int _key_expansion_256b(struct crypto_aes_ctx *ctx, const u8 *in_key, unsigned int key_len) __rap_hash;
++void _aesni_enc1(void *ctx, u8 *out, const u8 *in) __rap_hash;
++void _aesni_enc4(void *ctx, u8 *out, const u8 *in) __rap_hash;
++void _aesni_dec1(void *ctx, u8 *out, const u8 *in) __rap_hash;
++void _aesni_dec4(void *ctx, u8 *out, const u8 *in) __rap_hash;
+
+ int crypto_fpu_init(void);
+ void crypto_fpu_exit(void);
+@@ -96,6 +105,8 @@ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
+ asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
++void _aesni_inc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) __rap_hash;
++void _aesni_inc_init(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) __rap_hash;
+
+ asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, bool enc, u8 *iv);
+diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+index 246c670..d4e1aa5 100644
+--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
++++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+@@ -21,6 +21,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .file "blowfish-x86_64-asm.S"
+ .text
+@@ -149,13 +150,13 @@ ENTRY(__blowfish_enc_blk)
+ jnz .L__enc_xor;
+
+ write_block();
+- ret;
++ pax_ret __blowfish_enc_blk;
+ .L__enc_xor:
+ xor_block();
+- ret;
++ pax_ret __blowfish_enc_blk;
+ ENDPROC(__blowfish_enc_blk)
+
+-ENTRY(blowfish_dec_blk)
++RAP_ENTRY(blowfish_dec_blk)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+@@ -183,7 +184,7 @@ ENTRY(blowfish_dec_blk)
+
+ movq %r11, %rbp;
+
+- ret;
++ pax_ret blowfish_dec_blk;
+ ENDPROC(blowfish_dec_blk)
+
+ /**********************************************************************
+@@ -334,17 +335,17 @@ ENTRY(__blowfish_enc_blk_4way)
+
+ popq %rbx;
+ popq %rbp;
+- ret;
++ pax_ret __blowfish_enc_blk_4way;
+
+ .L__enc_xor4:
+ xor_block4();
+
+ popq %rbx;
+ popq %rbp;
+- ret;
++ pax_ret __blowfish_enc_blk_4way;
+ ENDPROC(__blowfish_enc_blk_4way)
+
+-ENTRY(blowfish_dec_blk_4way)
++RAP_ENTRY(blowfish_dec_blk_4way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst
+@@ -375,5 +376,5 @@ ENTRY(blowfish_dec_blk_4way)
+ popq %rbx;
+ popq %rbp;
+
+- ret;
++ pax_ret blowfish_dec_blk_4way;
+ ENDPROC(blowfish_dec_blk_4way)
+diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+index aa9e8bd..7e68f75 100644
+--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+@@ -17,6 +17,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+
+@@ -192,7 +193,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+ roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
+ %rcx, (%r9));
+- ret;
++ pax_ret roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd;
+ ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
+
+ .align 8
+@@ -200,7 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+ roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
+ %rax, (%r9));
+- ret;
++ pax_ret roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab;
+ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+
+ /*
+@@ -212,7 +213,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+ #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \
+ leaq (key_table + (i) * 8)(CTX), %r9; \
+- call roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
++ pax_direct_call roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
+ \
+ vmovdqu x4, 0 * 16(mem_cd); \
+ vmovdqu x5, 1 * 16(mem_cd); \
+@@ -224,7 +225,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+ vmovdqu x3, 7 * 16(mem_cd); \
+ \
+ leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \
+- call roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
++ pax_direct_call roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
+ \
+ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab);
+
+@@ -783,7 +784,7 @@ __camellia_enc_blk16:
+ %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
+
+ FRAME_END
+- ret;
++ pax_ret camellia_xts_enc_16way;
+
+ .align 8
+ .Lenc_max32:
+@@ -870,7 +871,7 @@ __camellia_dec_blk16:
+ %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
+
+ FRAME_END
+- ret;
++ pax_ret camellia_xts_dec_16way;
+
+ .align 8
+ .Ldec_max32:
+@@ -889,7 +890,7 @@ __camellia_dec_blk16:
+ jmp .Ldec_max24;
+ ENDPROC(__camellia_dec_blk16)
+
+-ENTRY(camellia_ecb_enc_16way)
++RAP_ENTRY(camellia_ecb_enc_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst (16 blocks)
+@@ -904,17 +905,17 @@ ENTRY(camellia_ecb_enc_16way)
+ /* now dst can be used as temporary buffer (even in src == dst case) */
+ movq %rsi, %rax;
+
+- call __camellia_enc_blk16;
++ pax_direct_call __camellia_enc_blk16;
+
+ write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0,
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+ %xmm8, %rsi);
+
+ FRAME_END
+- ret;
++ pax_ret camellia_ecb_enc_16way;
+ ENDPROC(camellia_ecb_enc_16way)
+
+-ENTRY(camellia_ecb_dec_16way)
++RAP_ENTRY(camellia_ecb_dec_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst (16 blocks)
+@@ -934,17 +935,17 @@ ENTRY(camellia_ecb_dec_16way)
+ /* now dst can be used as temporary buffer (even in src == dst case) */
+ movq %rsi, %rax;
+
+- call __camellia_dec_blk16;
++ pax_direct_call __camellia_dec_blk16;
+
+ write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0,
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+ %xmm8, %rsi);
+
+ FRAME_END
+- ret;
++ pax_ret camellia_ecb_dec_16way;
+ ENDPROC(camellia_ecb_dec_16way)
+
+-ENTRY(camellia_cbc_dec_16way)
++RAP_ENTRY(camellia_cbc_dec_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst (16 blocks)
+@@ -968,7 +969,7 @@ ENTRY(camellia_cbc_dec_16way)
+ subq $(16 * 16), %rsp;
+ movq %rsp, %rax;
+
+- call __camellia_dec_blk16;
++ pax_direct_call __camellia_dec_blk16;
+
+ addq $(16 * 16), %rsp;
+
+@@ -992,7 +993,7 @@ ENTRY(camellia_cbc_dec_16way)
+ %xmm8, %rsi);
+
+ FRAME_END
+- ret;
++ pax_ret camellia_cbc_dec_16way;
+ ENDPROC(camellia_cbc_dec_16way)
+
+ #define inc_le128(x, minus_one, tmp) \
+@@ -1001,7 +1002,7 @@ ENDPROC(camellia_cbc_dec_16way)
+ vpslldq $8, tmp, tmp; \
+ vpsubq tmp, x, x;
+
+-ENTRY(camellia_ctr_16way)
++RAP_ENTRY(camellia_ctr_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst (16 blocks)
+@@ -1080,7 +1081,7 @@ ENTRY(camellia_ctr_16way)
+ vpxor 14 * 16(%rax), %xmm15, %xmm14;
+ vpxor 15 * 16(%rax), %xmm15, %xmm15;
+
+- call __camellia_enc_blk16;
++ pax_direct_call __camellia_enc_blk16;
+
+ addq $(16 * 16), %rsp;
+
+@@ -1105,7 +1106,7 @@ ENTRY(camellia_ctr_16way)
+ %xmm8, %rsi);
+
+ FRAME_END
+- ret;
++ pax_ret camellia_ctr_16way;
+ ENDPROC(camellia_ctr_16way)
+
+ #define gf128mul_x_ble(iv, mask, tmp) \
+@@ -1224,7 +1225,7 @@ camellia_xts_crypt_16way:
+ vpxor 14 * 16(%rax), %xmm15, %xmm14;
+ vpxor 15 * 16(%rax), %xmm15, %xmm15;
+
+- call *%r9;
++ pax_indirect_call "%r9", camellia_xts_enc_16way;
+
+ addq $(16 * 16), %rsp;
+
+@@ -1249,10 +1250,10 @@ camellia_xts_crypt_16way:
+ %xmm8, %rsi);
+
+ FRAME_END
+- ret;
++ pax_ret camellia_xts_crypt_16way;
+ ENDPROC(camellia_xts_crypt_16way)
+
+-ENTRY(camellia_xts_enc_16way)
++RAP_ENTRY(camellia_xts_enc_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst (16 blocks)
+@@ -1266,7 +1267,7 @@ ENTRY(camellia_xts_enc_16way)
+ jmp camellia_xts_crypt_16way;
+ ENDPROC(camellia_xts_enc_16way)
+
+-ENTRY(camellia_xts_dec_16way)
++RAP_ENTRY(camellia_xts_dec_16way)
+ /* input:
+ * %rdi: ctx, CTX
+ * %rsi: dst (16 blocks)
+diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+index 16186c1..a751452 100644
+--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+@@ -12,6 +12,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/frame.h>
++#include <asm/alternative-asm.h>
+
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+
+@@ -231,7 +232,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+ roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
+ %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
+ %rcx, (%r9));
+- ret;
++ pax_ret roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd;
+ ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
+
+ .align 8
+@@ -239,7 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+ roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
+ %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
+ %rax, (%r9));
+- ret;
++ pax_ret roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab;
+ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+
+ /*
+@@ -251,7 +252,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+ #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
+ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \
+ leaq (key_table + (i) * 8)(CTX), %r9; \
+- call roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
++ pax_direct_call roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
+ \
+ vmovdqu x0, 4 * 32(mem_cd); \
+ vmovdqu x1, 5 * 32(mem_cd); \
+@@ -263,7 +264,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+ vmovdqu x7, 3 * 32(mem_cd); \
+ \
+ leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \
+- call roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
++ pax_direct_call roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
+ \
+ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab);
+
+@@ -823,7 +824,7 @@ __camellia_enc_blk32:
+ %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
+
+ FRAME_END
+- ret;
++ pax_ret __camellia_enc_blk32;
+
+ .align 8
+ .Lenc_max32:
+@@ -910,7 +911,7 @@ __camellia_dec_blk32:
+ %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
+
+ FRAME_END
+- ret;
++ pax_ret __camellia