diff options
author | 2015-09-20 14:42:47 -0400 | |
---|---|---|
committer | 2015-09-20 14:42:47 -0400 | |
commit | cf9115c0ed2ec392f3fffea6566d8e1f10e502f8 (patch) | |
tree | e0ebd15d944754273ec6cd7f4f966ace3fac0117 | |
parent | grsecurity-3.1-4.1.7-201509131604 (diff) | |
download | hardened-patchset-cf9115c0ed2ec392f3fffea6566d8e1f10e502f8.tar.gz hardened-patchset-cf9115c0ed2ec392f3fffea6566d8e1f10e502f8.tar.bz2 hardened-patchset-cf9115c0ed2ec392f3fffea6566d8e1f10e502f8.zip |
EOL: 3.2 and 3.14 series.
73 files changed, 0 insertions, 464588 deletions
diff --git a/3.14.51/0000_README b/3.14.51/0000_README deleted file mode 100644 index 430d8cd..0000000 --- a/3.14.51/0000_README +++ /dev/null @@ -1,48 +0,0 @@ -README ------------------------------------------------------------------------------ -Individual Patch Descriptions: ------------------------------------------------------------------------------ -Patch: 1050_linux-3.14.51.patch -From: http://www.kernel.org -Desc: Linux 3.14.51 - -Patch: 4420_grsecurity-3.1-3.14.51-201508181951.patch -From: http://www.grsecurity.net -Desc: hardened-sources base patch from upstream grsecurity - -Patch: 4425_grsec_remove_EI_PAX.patch -From: Anthony G. Basile <blueness@gentoo.org> -Desc: Remove EI_PAX option and force off - -Patch: 4430_grsec-remove-localversion-grsec.patch -From: Kerin Millar <kerframil@gmail.com> -Desc: Removes grsecurity's localversion-grsec file - -Patch: 4435_grsec-mute-warnings.patch -From: Alexander Gabert <gaberta@fh-trier.de> - Gordon Malm <gengor@gentoo.org> -Desc: Removes verbose compile warning settings from grsecurity, restores - mainline Linux kernel behavior - -Patch: 4440_grsec-remove-protected-paths.patch -From: Anthony G. Basile <blueness@gentoo.org> -Desc: Removes chmod statements from grsecurity/Makefile - -Patch: 4450_grsec-kconfig-default-gids.patch -From: Kerin Millar <kerframil@gmail.com> -Desc: Sets sane(r) default GIDs on various grsecurity group-dependent - features - -Patch: 4465_selinux-avc_audit-log-curr_ip.patch -From: Gordon Malm <gengor@gentoo.org> - Anthony G. Basile <blueness@gentoo.org> -Desc: Configurable option to add src IP address to SELinux log messages - -Patch: 4470_disable-compat_vdso.patch -From: Gordon Malm <gengor@gentoo.org> - Kerin Millar <kerframil@gmail.com> -Desc: Disables VDSO_COMPAT operation completely - -Patch: 4475_emutramp_default_on.patch -From: Anthony G. Basile <blueness@gentoo.org> -Desc: Set PAX_EMUTRAMP default on for libffi, bugs #329499 and #457194 diff --git a/3.14.51/1050_linux-3.14.51.patch b/3.14.51/1050_linux-3.14.51.patch deleted file mode 100644 index 8c28a74..0000000 --- a/3.14.51/1050_linux-3.14.51.patch +++ /dev/null @@ -1,1929 +0,0 @@ -diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy -index 4c3efe4..750ab97 100644 ---- a/Documentation/ABI/testing/ima_policy -+++ b/Documentation/ABI/testing/ima_policy -@@ -20,16 +20,18 @@ Description: - action: measure | dont_measure | appraise | dont_appraise | audit - condition:= base | lsm [option] - base: [[func=] [mask=] [fsmagic=] [fsuuid=] [uid=] -- [fowner]] -+ [euid=] [fowner=]] - lsm: [[subj_user=] [subj_role=] [subj_type=] - [obj_user=] [obj_role=] [obj_type=]] - option: [[appraise_type=]] [permit_directio] - - base: func:= [BPRM_CHECK][MMAP_CHECK][FILE_CHECK][MODULE_CHECK] -- mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC] -+ mask:= [[^]MAY_READ] [[^]MAY_WRITE] [[^]MAY_APPEND] -+ [[^]MAY_EXEC] - fsmagic:= hex value - fsuuid:= file system UUID (e.g 8bcbe394-4f13-4144-be8e-5aa9ea2ce2f6) - uid:= decimal value -+ euid:= decimal value - fowner:=decimal value - lsm: are LSM specific - option: appraise_type:= [imasig] -diff --git a/Makefile b/Makefile -index d71c40a..83275d8e 100644 ---- a/Makefile -+++ b/Makefile -@@ -1,6 +1,6 @@ - VERSION = 3 - PATCHLEVEL = 14 --SUBLEVEL = 50 -+SUBLEVEL = 51 - EXTRAVERSION = - NAME = Remembering Coco - -diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h -index 22a3b9b..4157aec 100644 ---- a/arch/arm/include/asm/smp.h -+++ b/arch/arm/include/asm/smp.h -@@ -74,6 +74,7 @@ struct secondary_data { - }; - extern struct secondary_data secondary_data; - extern volatile int pen_release; -+extern void secondary_startup(void); - - extern int __cpu_disable(void); - -diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c -index 4551efd..399af1e 100644 ---- a/arch/arm/mach-omap2/omap_hwmod.c -+++ b/arch/arm/mach-omap2/omap_hwmod.c -@@ -2452,6 +2452,9 @@ static int of_dev_hwmod_lookup(struct device_node *np, - * registers. This address is needed early so the OCP registers that - * are part of the device's address space can be ioremapped properly. - * -+ * If SYSC access is not needed, the registers will not be remapped -+ * and non-availability of MPU access is not treated as an error. -+ * - * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and - * -ENXIO on absent or invalid register target address space. - */ -@@ -2466,6 +2469,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, - - _save_mpu_port_index(oh); - -+ /* if we don't need sysc access we don't need to ioremap */ -+ if (!oh->class->sysc) -+ return 0; -+ -+ /* we can't continue without MPU PORT if we need sysc access */ - if (oh->_int_flags & _HWMOD_NO_MPU_PORT) - return -ENXIO; - -@@ -2475,8 +2483,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data, - oh->name); - - /* Extract the IO space from device tree blob */ -- if (!np) -+ if (!np) { -+ pr_err("omap_hwmod: %s: no dt node\n", oh->name); - return -ENXIO; -+ } - - va_start = of_iomap(np, index + oh->mpu_rt_idx); - } else { -@@ -2535,13 +2545,11 @@ static int __init _init(struct omap_hwmod *oh, void *data) - oh->name, np->name); - } - -- if (oh->class->sysc) { -- r = _init_mpu_rt_base(oh, NULL, index, np); -- if (r < 0) { -- WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", -- oh->name); -- return 0; -- } -+ r = _init_mpu_rt_base(oh, NULL, index, np); -+ if (r < 0) { -+ WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", -+ oh->name); -+ return 0; - } - - r = _init_clocks(oh, NULL); -diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h -index 2022e09..db09170 100644 ---- a/arch/arm/mach-realview/include/mach/memory.h -+++ b/arch/arm/mach-realview/include/mach/memory.h -@@ -56,6 +56,8 @@ - #define PAGE_OFFSET1 (PAGE_OFFSET + 0x10000000) - #define PAGE_OFFSET2 (PAGE_OFFSET + 0x30000000) - -+#define PHYS_OFFSET PLAT_PHYS_OFFSET -+ - #define __phys_to_virt(phys) \ - ((phys) >= 0x80000000 ? (phys) - 0x80000000 + PAGE_OFFSET2 : \ - (phys) >= 0x20000000 ? (phys) - 0x20000000 + PAGE_OFFSET1 : \ -diff --git a/arch/arm/mach-sunxi/Makefile b/arch/arm/mach-sunxi/Makefile -index d939720..27b168f 100644 ---- a/arch/arm/mach-sunxi/Makefile -+++ b/arch/arm/mach-sunxi/Makefile -@@ -1,2 +1,2 @@ - obj-$(CONFIG_ARCH_SUNXI) += sunxi.o --obj-$(CONFIG_SMP) += platsmp.o headsmp.o -+obj-$(CONFIG_SMP) += platsmp.o -diff --git a/arch/arm/mach-sunxi/headsmp.S b/arch/arm/mach-sunxi/headsmp.S -deleted file mode 100644 -index a10d494..0000000 ---- a/arch/arm/mach-sunxi/headsmp.S -+++ /dev/null -@@ -1,9 +0,0 @@ --#include <linux/linkage.h> --#include <linux/init.h> -- -- .section ".text.head", "ax" -- --ENTRY(sun6i_secondary_startup) -- msr cpsr_fsxc, #0xd3 -- b secondary_startup --ENDPROC(sun6i_secondary_startup) -diff --git a/arch/arm/mach-sunxi/platsmp.c b/arch/arm/mach-sunxi/platsmp.c -index 7b141d8..0c7dbce 100644 ---- a/arch/arm/mach-sunxi/platsmp.c -+++ b/arch/arm/mach-sunxi/platsmp.c -@@ -82,7 +82,7 @@ static int sun6i_smp_boot_secondary(unsigned int cpu, - spin_lock(&cpu_lock); - - /* Set CPU boot address */ -- writel(virt_to_phys(sun6i_secondary_startup), -+ writel(virt_to_phys(secondary_startup), - cpucfg_membase + CPUCFG_PRIVATE0_REG); - - /* Assert the CPU core in reset */ -diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c -index 7ed72dc..a966bac 100644 ---- a/arch/arm64/kernel/signal32.c -+++ b/arch/arm64/kernel/signal32.c -@@ -165,7 +165,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) - * Other callers might not initialize the si_lsb field, - * so check explicitely for the right codes here. - */ -- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) -+ if (from->si_signo == SIGBUS && -+ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) - err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); - #endif - break; -@@ -192,8 +193,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) - - int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) - { -- memset(to, 0, sizeof *to); -- - if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || - copy_from_user(to->_sifields._pad, - from->_sifields._pad, SI_PAD_SIZE)) -diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h -index 008324d..b154953 100644 ---- a/arch/mips/include/asm/pgtable.h -+++ b/arch/mips/include/asm/pgtable.h -@@ -150,8 +150,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) - * Make sure the buddy is global too (if it's !none, - * it better already be global) - */ -+#ifdef CONFIG_SMP -+ /* -+ * For SMP, multiple CPUs can race, so we need to do -+ * this atomically. -+ */ -+#ifdef CONFIG_64BIT -+#define LL_INSN "lld" -+#define SC_INSN "scd" -+#else /* CONFIG_32BIT */ -+#define LL_INSN "ll" -+#define SC_INSN "sc" -+#endif -+ unsigned long page_global = _PAGE_GLOBAL; -+ unsigned long tmp; -+ -+ __asm__ __volatile__ ( -+ " .set push\n" -+ " .set noreorder\n" -+ "1: " LL_INSN " %[tmp], %[buddy]\n" -+ " bnez %[tmp], 2f\n" -+ " or %[tmp], %[tmp], %[global]\n" -+ " " SC_INSN " %[tmp], %[buddy]\n" -+ " beqz %[tmp], 1b\n" -+ " nop\n" -+ "2:\n" -+ " .set pop" -+ : [buddy] "+m" (buddy->pte), -+ [tmp] "=&r" (tmp) -+ : [global] "r" (page_global)); -+#else /* !CONFIG_SMP */ - if (pte_none(*buddy)) - pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; -+#endif /* CONFIG_SMP */ - } - #endif - } -diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c -index cb09862..ca16964 100644 ---- a/arch/mips/kernel/mips-mt-fpaff.c -+++ b/arch/mips/kernel/mips-mt-fpaff.c -@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, - unsigned long __user *user_mask_ptr) - { - unsigned int real_len; -- cpumask_t mask; -+ cpumask_t allowed, mask; - int retval; - struct task_struct *p; - -@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, - if (retval) - goto out_unlock; - -- cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); -+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed); -+ cpumask_and(&mask, &allowed, cpu_active_mask); - - out_unlock: - read_unlock(&tasklist_lock); -diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c -index 3d60f77..ea585cf 100644 ---- a/arch/mips/kernel/signal32.c -+++ b/arch/mips/kernel/signal32.c -@@ -370,8 +370,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) - - int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) - { -- memset(to, 0, sizeof *to); -- - if (copy_from_user(to, from, 3*sizeof(int)) || - copy_from_user(to->_sifields._pad, - from->_sifields._pad, SI_PAD_SIZE32)) -diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c -index 3190099..d4ab447 100644 ---- a/arch/mips/mti-malta/malta-time.c -+++ b/arch/mips/mti-malta/malta-time.c -@@ -168,14 +168,17 @@ unsigned int get_c0_compare_int(void) - - static void __init init_rtc(void) - { -- /* stop the clock whilst setting it up */ -- CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL); -+ unsigned char freq, ctrl; - -- /* 32KHz time base */ -- CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); -+ /* Set 32KHz time base if not already set */ -+ freq = CMOS_READ(RTC_FREQ_SELECT); -+ if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ) -+ CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); - -- /* start the clock */ -- CMOS_WRITE(RTC_24H, RTC_CONTROL); -+ /* Ensure SET bit is clear so RTC can run */ -+ ctrl = CMOS_READ(RTC_CONTROL); -+ if (ctrl & RTC_SET) -+ CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL); - } - - void __init plat_time_init(void) -diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c -index 4e47db6..e881e3f 100644 ---- a/arch/powerpc/kernel/signal_32.c -+++ b/arch/powerpc/kernel/signal_32.c -@@ -967,8 +967,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s) - - int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) - { -- memset(to, 0, sizeof *to); -- - if (copy_from_user(to, from, 3*sizeof(int)) || - copy_from_user(to->_sifields._pad, - from->_sifields._pad, SI_PAD_SIZE32)) -diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h -index 11fdf0e..50d6f16 100644 ---- a/arch/sparc/include/asm/visasm.h -+++ b/arch/sparc/include/asm/visasm.h -@@ -28,16 +28,10 @@ - * Must preserve %o5 between VISEntryHalf and VISExitHalf */ - - #define VISEntryHalf \ -- rd %fprs, %o5; \ -- andcc %o5, FPRS_FEF, %g0; \ -- be,pt %icc, 297f; \ -- sethi %hi(298f), %g7; \ -- sethi %hi(VISenterhalf), %g1; \ -- jmpl %g1 + %lo(VISenterhalf), %g0; \ -- or %g7, %lo(298f), %g7; \ -- clr %o5; \ --297: wr %o5, FPRS_FEF, %fprs; \ --298: -+ VISEntry -+ -+#define VISExitHalf \ -+ VISExit - - #define VISEntryHalfFast(fail_label) \ - rd %fprs, %o5; \ -@@ -47,7 +41,7 @@ - ba,a,pt %xcc, fail_label; \ - 297: wr %o5, FPRS_FEF, %fprs; - --#define VISExitHalf \ -+#define VISExitHalfFast \ - wr %o5, 0, %fprs; - - #ifndef __ASSEMBLY__ -diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S -index 140527a..83aeeb1 100644 ---- a/arch/sparc/lib/NG4memcpy.S -+++ b/arch/sparc/lib/NG4memcpy.S -@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ - add %o0, 0x40, %o0 - bne,pt %icc, 1b - LOAD(prefetch, %g1 + 0x200, #n_reads_strong) -+#ifdef NON_USER_COPY -+ VISExitHalfFast -+#else - VISExitHalf -- -+#endif - brz,pn %o2, .Lexit - cmp %o2, 19 - ble,pn %icc, .Lsmall_unaligned -diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S -index b320ae9..a063d84 100644 ---- a/arch/sparc/lib/VISsave.S -+++ b/arch/sparc/lib/VISsave.S -@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 - - stx %g3, [%g6 + TI_GSR] - 2: add %g6, %g1, %g3 -- cmp %o5, FPRS_DU -- be,pn %icc, 6f -- sll %g1, 3, %g1 -+ mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5 -+ sll %g1, 3, %g1 - stb %o5, [%g3 + TI_FPSAVED] - rd %gsr, %g2 - add %g6, %g1, %g3 -@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 - .align 32 - 80: jmpl %g7 + %g0, %g0 - nop -- --6: ldub [%g3 + TI_FPSAVED], %o5 -- or %o5, FPRS_DU, %o5 -- add %g6, TI_FPREGS+0x80, %g2 -- stb %o5, [%g3 + TI_FPSAVED] -- -- sll %g1, 5, %g1 -- add %g6, TI_FPREGS+0xc0, %g3 -- wr %g0, FPRS_FEF, %fprs -- membar #Sync -- stda %f32, [%g2 + %g1] ASI_BLK_P -- stda %f48, [%g3 + %g1] ASI_BLK_P -- membar #Sync -- ba,pt %xcc, 80f -- nop -- -- .align 32 --80: jmpl %g7 + %g0, %g0 -- nop -- -- .align 32 --VISenterhalf: -- ldub [%g6 + TI_FPDEPTH], %g1 -- brnz,a,pn %g1, 1f -- cmp %g1, 1 -- stb %g0, [%g6 + TI_FPSAVED] -- stx %fsr, [%g6 + TI_XFSR] -- clr %o5 -- jmpl %g7 + %g0, %g0 -- wr %g0, FPRS_FEF, %fprs -- --1: bne,pn %icc, 2f -- srl %g1, 1, %g1 -- ba,pt %xcc, vis1 -- sub %g7, 8, %g7 --2: addcc %g6, %g1, %g3 -- sll %g1, 3, %g1 -- andn %o5, FPRS_DU, %g2 -- stb %g2, [%g3 + TI_FPSAVED] -- -- rd %gsr, %g2 -- add %g6, %g1, %g3 -- stx %g2, [%g3 + TI_GSR] -- add %g6, %g1, %g2 -- stx %fsr, [%g2 + TI_XFSR] -- sll %g1, 5, %g1 --3: andcc %o5, FPRS_DL, %g0 -- be,pn %icc, 4f -- add %g6, TI_FPREGS, %g2 -- -- add %g6, TI_FPREGS+0x40, %g3 -- membar #Sync -- stda %f0, [%g2 + %g1] ASI_BLK_P -- stda %f16, [%g3 + %g1] ASI_BLK_P -- membar #Sync -- ba,pt %xcc, 4f -- nop -- -- .align 32 --4: and %o5, FPRS_DU, %o5 -- jmpl %g7 + %g0, %g0 -- wr %o5, FPRS_FEF, %fprs -diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c -index 323335b..ac094de 100644 ---- a/arch/sparc/lib/ksyms.c -+++ b/arch/sparc/lib/ksyms.c -@@ -126,10 +126,6 @@ EXPORT_SYMBOL(copy_user_page); - void VISenter(void); - EXPORT_SYMBOL(VISenter); - --/* CRYPTO code needs this */ --void VISenterhalf(void); --EXPORT_SYMBOL(VISenterhalf); -- - extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); - extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, - unsigned long *); -diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h -index 6a11845..7205173 100644 ---- a/arch/x86/kvm/lapic.h -+++ b/arch/x86/kvm/lapic.h -@@ -165,7 +165,7 @@ static inline u16 apic_logical_id(struct kvm_apic_map *map, u32 ldr) - - static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu) - { -- return vcpu->arch.apic->pending_events; -+ return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events; - } - - bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector); -diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c -index 201d09a..2302f10 100644 ---- a/arch/x86/xen/enlighten.c -+++ b/arch/x86/xen/enlighten.c -@@ -481,6 +481,7 @@ static void set_aliased_prot(void *v, pgprot_t prot) - pte_t pte; - unsigned long pfn; - struct page *page; -+ unsigned char dummy; - - ptep = lookup_address((unsigned long)v, &level); - BUG_ON(ptep == NULL); -@@ -490,6 +491,32 @@ static void set_aliased_prot(void *v, pgprot_t prot) - - pte = pfn_pte(pfn, prot); - -+ /* -+ * Careful: update_va_mapping() will fail if the virtual address -+ * we're poking isn't populated in the page tables. We don't -+ * need to worry about the direct map (that's always in the page -+ * tables), but we need to be careful about vmap space. In -+ * particular, the top level page table can lazily propagate -+ * entries between processes, so if we've switched mms since we -+ * vmapped the target in the first place, we might not have the -+ * top-level page table entry populated. -+ * -+ * We disable preemption because we want the same mm active when -+ * we probe the target and when we issue the hypercall. We'll -+ * have the same nominal mm, but if we're a kernel thread, lazy -+ * mm dropping could change our pgd. -+ * -+ * Out of an abundance of caution, this uses __get_user() to fault -+ * in the target address just in case there's some obscure case -+ * in which the target address isn't readable. -+ */ -+ -+ preempt_disable(); -+ -+ pagefault_disable(); /* Avoid warnings due to being atomic. */ -+ __get_user(dummy, (unsigned char __user __force *)v); -+ pagefault_enable(); -+ - if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) - BUG(); - -@@ -501,6 +528,8 @@ static void set_aliased_prot(void *v, pgprot_t prot) - BUG(); - } else - kmap_flush_unused(); -+ -+ preempt_enable(); - } - - static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) -@@ -508,6 +537,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) - const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; - int i; - -+ /* -+ * We need to mark the all aliases of the LDT pages RO. We -+ * don't need to call vm_flush_aliases(), though, since that's -+ * only responsible for flushing aliases out the TLBs, not the -+ * page tables, and Xen will flush the TLB for us if needed. -+ * -+ * To avoid confusing future readers: none of this is necessary -+ * to load the LDT. The hypervisor only checks this when the -+ * LDT is faulted in due to subsequent descriptor access. -+ */ -+ - for(i = 0; i < entries; i += entries_per_page) - set_aliased_prot(ldt + i, PAGE_KERNEL_RO); - } -diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c -index 12be7cb..b583773 100644 ---- a/drivers/block/rbd.c -+++ b/drivers/block/rbd.c -@@ -508,6 +508,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) - # define rbd_assert(expr) ((void) 0) - #endif /* !RBD_DEBUG */ - -+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request); - static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); - static void rbd_img_parent_read(struct rbd_obj_request *obj_request); - static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); -@@ -1651,6 +1652,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request) - obj_request_done_set(obj_request); - } - -+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request) -+{ -+ dout("%s: obj %p\n", __func__, obj_request); -+ -+ if (obj_request_img_data_test(obj_request)) -+ rbd_osd_copyup_callback(obj_request); -+ else -+ obj_request_done_set(obj_request); -+} -+ - static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, - struct ceph_msg *msg) - { -@@ -1689,6 +1700,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, - rbd_osd_stat_callback(obj_request); - break; - case CEPH_OSD_OP_CALL: -+ rbd_osd_call_callback(obj_request); -+ break; - case CEPH_OSD_OP_NOTIFY_ACK: - case CEPH_OSD_OP_WATCH: - rbd_osd_trivial_callback(obj_request); -@@ -2275,13 +2288,15 @@ out_unwind: - } - - static void --rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) -+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request) - { - struct rbd_img_request *img_request; - struct rbd_device *rbd_dev; - struct page **pages; - u32 page_count; - -+ dout("%s: obj %p\n", __func__, obj_request); -+ - rbd_assert(obj_request->type == OBJ_REQUEST_BIO); - rbd_assert(obj_request_img_data_test(obj_request)); - img_request = obj_request->img_request; -@@ -2307,9 +2322,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) - if (!obj_request->result) - obj_request->xferred = obj_request->length; - -- /* Finish up with the normal image object callback */ -- -- rbd_img_obj_callback(obj_request); -+ obj_request_done_set(obj_request); - } - - static void -@@ -2406,7 +2419,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) - - /* All set, send it off. */ - -- orig_request->callback = rbd_img_obj_copyup_callback; - osdc = &rbd_dev->rbd_client->client->osdc; - img_result = rbd_obj_request_submit(osdc, orig_request); - if (!img_result) -diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c -index f757a0f..3beed38 100644 ---- a/drivers/crypto/ixp4xx_crypto.c -+++ b/drivers/crypto/ixp4xx_crypto.c -@@ -904,7 +904,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt) - crypt->mode |= NPE_OP_NOT_IN_PLACE; - /* This was never tested by Intel - * for more than one dst buffer, I think. */ -- BUG_ON(req->dst->length < nbytes); - req_ctx->dst = NULL; - if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, - flags, DMA_FROM_DEVICE)) -diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c -index 6651177..79a2669 100644 ---- a/drivers/gpu/drm/radeon/radeon_combios.c -+++ b/drivers/gpu/drm/radeon/radeon_combios.c -@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder - - if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && - (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { -+ u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; -+ -+ if (hss > lvds->native_mode.hdisplay) -+ hss = (10 - 1) * 8; -+ - lvds->native_mode.htotal = lvds->native_mode.hdisplay + - (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; - lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + -- (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; -+ hss; - lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + - (RBIOS8(tmp + 23) * 8); - -diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c -index 8e51b3a..cc3dc0c 100644 ---- a/drivers/md/bitmap.c -+++ b/drivers/md/bitmap.c -@@ -564,6 +564,8 @@ static int bitmap_read_sb(struct bitmap *bitmap) - if (err) - return err; - -+ err = -EINVAL; -+ - sb = kmap_atomic(sb_page); - - chunksize = le32_to_cpu(sb->chunksize); -diff --git a/drivers/md/md.c b/drivers/md/md.c -index b4067b9..2ffd277 100644 ---- a/drivers/md/md.c -+++ b/drivers/md/md.c -@@ -5645,8 +5645,7 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg) - char *ptr, *buf = NULL; - int err = -ENOMEM; - -- file = kmalloc(sizeof(*file), GFP_NOIO); -- -+ file = kzalloc(sizeof(*file), GFP_NOIO); - if (!file) - goto out; - -diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c -index 9be97e0..47b7c31 100644 ---- a/drivers/md/raid1.c -+++ b/drivers/md/raid1.c -@@ -1477,6 +1477,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) - { - char b[BDEVNAME_SIZE]; - struct r1conf *conf = mddev->private; -+ unsigned long flags; - - /* - * If it is not operational, then we have already marked it as dead -@@ -1496,14 +1497,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) - return; - } - set_bit(Blocked, &rdev->flags); -+ spin_lock_irqsave(&conf->device_lock, flags); - if (test_and_clear_bit(In_sync, &rdev->flags)) { -- unsigned long flags; -- spin_lock_irqsave(&conf->device_lock, flags); - mddev->degraded++; - set_bit(Faulty, &rdev->flags); -- spin_unlock_irqrestore(&conf->device_lock, flags); - } else - set_bit(Faulty, &rdev->flags); -+ spin_unlock_irqrestore(&conf->device_lock, flags); - /* - * if recovery is running, make sure it aborts. - */ -@@ -1569,7 +1569,10 @@ static int raid1_spare_active(struct mddev *mddev) - * Find all failed disks within the RAID1 configuration - * and mark them readable. - * Called under mddev lock, so rcu protection not needed. -+ * device_lock used to avoid races with raid1_end_read_request -+ * which expects 'In_sync' flags and ->degraded to be consistent. - */ -+ spin_lock_irqsave(&conf->device_lock, flags); - for (i = 0; i < conf->raid_disks; i++) { - struct md_rdev *rdev = conf->mirrors[i].rdev; - struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; -@@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev) - sysfs_notify_dirent_safe(rdev->sysfs_state); - } - } -- spin_lock_irqsave(&conf->device_lock, flags); - mddev->degraded -= count; - spin_unlock_irqrestore(&conf->device_lock, flags); - -diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c -index b4ddb73..128dc2f 100644 ---- a/drivers/scsi/ipr.c -+++ b/drivers/scsi/ipr.c -@@ -592,9 +592,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, - { - struct ipr_trace_entry *trace_entry; - struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; -+ unsigned int trace_index; - -- trace_entry = &ioa_cfg->trace[atomic_add_return -- (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; -+ trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK; -+ trace_entry = &ioa_cfg->trace[trace_index]; - trace_entry->time = jiffies; - trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; - trace_entry->type = type; -@@ -1044,10 +1045,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, - - static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) - { -+ unsigned int hrrq; -+ - if (ioa_cfg->hrrq_num == 1) -- return 0; -- else -- return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; -+ hrrq = 0; -+ else { -+ hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index); -+ hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1; -+ } -+ return hrrq; - } - - /** -@@ -6179,21 +6185,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) - struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; - struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; - u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); -- unsigned long hrrq_flags; -+ unsigned long lock_flags; - - scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); - - if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { - scsi_dma_unmap(scsi_cmd); - -- spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); -+ spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); - scsi_cmd->scsi_done(scsi_cmd); -- spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); -+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); - } else { -- spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); -+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); -+ spin_lock(&ipr_cmd->hrrq->_lock); - ipr_erp_start(ioa_cfg, ipr_cmd); -- spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); -+ spin_unlock(&ipr_cmd->hrrq->_lock); -+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - } - } - -diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h -index 02edae7..694ec20 100644 ---- a/drivers/scsi/ipr.h -+++ b/drivers/scsi/ipr.h -@@ -1459,6 +1459,7 @@ struct ipr_ioa_cfg { - - #define IPR_NUM_TRACE_INDEX_BITS 8 - #define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) -+#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1) - #define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) - char trace_start[8]; - #define IPR_TRACE_START_LABEL "trace" -diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c -index eb81c98..721d839 100644 ---- a/drivers/scsi/sg.c -+++ b/drivers/scsi/sg.c -@@ -1694,6 +1694,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) - md->from_user = 0; - } - -+ if (unlikely(iov_count > UIO_MAXIOV)) -+ return -EINVAL; -+ - if (iov_count) { - int len, size = sizeof(struct sg_iovec) * iov_count; - struct iovec *iov; -diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c -index 55ec9b4..9dbf176 100644 ---- a/drivers/target/iscsi/iscsi_target.c -+++ b/drivers/target/iscsi/iscsi_target.c -@@ -3937,7 +3937,13 @@ get_immediate: - } - - transport_err: -- iscsit_take_action_for_connection_exit(conn); -+ /* -+ * Avoid the normal connection failure code-path if this connection -+ * is still within LOGIN mode, and iscsi_np process context is -+ * responsible for cleaning up the early connection failure. -+ */ -+ if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) -+ iscsit_take_action_for_connection_exit(conn); - out: - return 0; - } -@@ -4023,7 +4029,7 @@ reject: - - int iscsi_target_rx_thread(void *arg) - { -- int ret; -+ int ret, rc; - u8 buffer[ISCSI_HDR_LEN], opcode; - u32 checksum = 0, digest = 0; - struct iscsi_conn *conn = arg; -@@ -4033,10 +4039,16 @@ int iscsi_target_rx_thread(void *arg) - * connection recovery / failure event can be triggered externally. - */ - allow_signal(SIGINT); -+ /* -+ * Wait for iscsi_post_login_handler() to complete before allowing -+ * incoming iscsi/tcp socket I/O, and/or failing the connection. -+ */ -+ rc = wait_for_completion_interruptible(&conn->rx_login_comp); -+ if (rc < 0) -+ return 0; - - if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { - struct completion comp; -- int rc; - - init_completion(&comp); - rc = wait_for_completion_interruptible(&comp); -diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h -index 825b579..92abbe2 100644 ---- a/drivers/target/iscsi/iscsi_target_core.h -+++ b/drivers/target/iscsi/iscsi_target_core.h -@@ -604,6 +604,7 @@ struct iscsi_conn { - int bitmap_id; - int rx_thread_active; - struct task_struct *rx_thread; -+ struct completion rx_login_comp; - int tx_thread_active; - struct task_struct *tx_thread; - /* list_head for session connection list */ -diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c -index 449df09..01c27aa 100644 ---- a/drivers/target/iscsi/iscsi_target_login.c -+++ b/drivers/target/iscsi/iscsi_target_login.c -@@ -83,6 +83,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn) - init_completion(&conn->conn_logout_comp); - init_completion(&conn->rx_half_close_comp); - init_completion(&conn->tx_half_close_comp); -+ init_completion(&conn->rx_login_comp); - spin_lock_init(&conn->cmd_lock); - spin_lock_init(&conn->conn_usage_lock); - spin_lock_init(&conn->immed_queue_lock); -@@ -716,6 +717,7 @@ int iscsit_start_kthreads(struct iscsi_conn *conn) - - return 0; - out_tx: -+ send_sig(SIGINT, conn->tx_thread, 1); - kthread_stop(conn->tx_thread); - conn->tx_thread_active = false; - out_bitmap: -@@ -726,7 +728,7 @@ out_bitmap: - return ret; - } - --int iscsi_post_login_handler( -+void iscsi_post_login_handler( - struct iscsi_np *np, - struct iscsi_conn *conn, - u8 zero_tsih) -@@ -736,7 +738,6 @@ int iscsi_post_login_handler( - struct se_session *se_sess = sess->se_sess; - struct iscsi_portal_group *tpg = sess->tpg; - struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; -- int rc; - - iscsit_inc_conn_usage_count(conn); - -@@ -777,10 +778,6 @@ int iscsi_post_login_handler( - sess->sess_ops->InitiatorName); - spin_unlock_bh(&sess->conn_lock); - -- rc = iscsit_start_kthreads(conn); -- if (rc) -- return rc; -- - iscsi_post_login_start_timers(conn); - /* - * Determine CPU mask to ensure connection's RX and TX kthreads -@@ -789,15 +786,20 @@ int iscsi_post_login_handler( - iscsit_thread_get_cpumask(conn); - conn->conn_rx_reset_cpumask = 1; - conn->conn_tx_reset_cpumask = 1; -- -+ /* -+ * Wakeup the sleeping iscsi_target_rx_thread() now that -+ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. -+ */ -+ complete(&conn->rx_login_comp); - iscsit_dec_conn_usage_count(conn); -+ - if (stop_timer) { - spin_lock_bh(&se_tpg->session_lock); - iscsit_stop_time2retain_timer(sess); - spin_unlock_bh(&se_tpg->session_lock); - } - iscsit_dec_session_usage_count(sess); -- return 0; -+ return; - } - - iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); -@@ -838,10 +840,6 @@ int iscsi_post_login_handler( - " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); - spin_unlock_bh(&se_tpg->session_lock); - -- rc = iscsit_start_kthreads(conn); -- if (rc) -- return rc; -- - iscsi_post_login_start_timers(conn); - /* - * Determine CPU mask to ensure connection's RX and TX kthreads -@@ -850,10 +848,12 @@ int iscsi_post_login_handler( - iscsit_thread_get_cpumask(conn); - conn->conn_rx_reset_cpumask = 1; - conn->conn_tx_reset_cpumask = 1; -- -+ /* -+ * Wakeup the sleeping iscsi_target_rx_thread() now that -+ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. -+ */ -+ complete(&conn->rx_login_comp); - iscsit_dec_conn_usage_count(conn); -- -- return 0; - } - - static void iscsi_handle_login_thread_timeout(unsigned long data) -@@ -1418,23 +1418,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) - if (ret < 0) - goto new_sess_out; - -- if (!conn->sess) { -- pr_err("struct iscsi_conn session pointer is NULL!\n"); -- goto new_sess_out; -- } -- - iscsi_stop_login_thread_timer(np); - -- if (signal_pending(current)) -- goto new_sess_out; -- - if (ret == 1) { - tpg_np = conn->tpg_np; - -- ret = iscsi_post_login_handler(np, conn, zero_tsih); -- if (ret < 0) -- goto new_sess_out; -- -+ iscsi_post_login_handler(np, conn, zero_tsih); - iscsit_deaccess_np(np, tpg, tpg_np); - } - -diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h -index 29d0983..55cbf45 100644 ---- a/drivers/target/iscsi/iscsi_target_login.h -+++ b/drivers/target/iscsi/iscsi_target_login.h -@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); - extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); - extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); - extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); --extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); -+extern int iscsit_start_kthreads(struct iscsi_conn *); -+extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); - extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, - bool, bool); - extern int iscsi_target_login_thread(void *); -diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c -index 582ba84..25ad113 100644 ---- a/drivers/target/iscsi/iscsi_target_nego.c -+++ b/drivers/target/iscsi/iscsi_target_nego.c -@@ -17,6 +17,7 @@ - ******************************************************************************/ - - #include <linux/ctype.h> -+#include <linux/kthread.h> - #include <scsi/iscsi_proto.h> - #include <target/target_core_base.h> - #include <target/target_core_fabric.h> -@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log - ntohl(login_rsp->statsn), login->rsp_length); - - padding = ((-login->rsp_length) & 3); -+ /* -+ * Before sending the last login response containing the transition -+ * bit for full-feature-phase, go ahead and start up TX/RX threads -+ * now to avoid potential resource allocation failures after the -+ * final login response has been sent. -+ */ -+ if (login->login_complete) { -+ int rc = iscsit_start_kthreads(conn); -+ if (rc) { -+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, -+ ISCSI_LOGIN_STATUS_NO_RESOURCES); -+ return -1; -+ } -+ } - - if (conn->conn_transport->iscsit_put_login_tx(conn, login, - login->rsp_length + padding) < 0) -- return -1; -+ goto err; - - login->rsp_length = 0; - mutex_lock(&sess->cmdsn_mutex); -@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log - mutex_unlock(&sess->cmdsn_mutex); - - return 0; -+ -+err: -+ if (login->login_complete) { -+ if (conn->rx_thread && conn->rx_thread_active) { -+ send_sig(SIGINT, conn->rx_thread, 1); -+ kthread_stop(conn->rx_thread); -+ } -+ if (conn->tx_thread && conn->tx_thread_active) { -+ send_sig(SIGINT, conn->tx_thread, 1); -+ kthread_stop(conn->tx_thread); -+ } -+ spin_lock(&iscsit_global->ts_bitmap_lock); -+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, -+ get_order(1)); -+ spin_unlock(&iscsit_global->ts_bitmap_lock); -+ } -+ return -1; - } - - static void iscsi_target_sk_data_ready(struct sock *sk, int count) -diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c -index bcc43a2..a365e97 100644 ---- a/drivers/usb/host/xhci-ring.c -+++ b/drivers/usb/host/xhci-ring.c -@@ -86,7 +86,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, - return 0; - /* offset in TRBs */ - segment_offset = trb - seg->trbs; -- if (segment_offset > TRBS_PER_SEGMENT) -+ if (segment_offset >= TRBS_PER_SEGMENT) - return 0; - return seg->dma + (segment_offset * sizeof(*trb)); - } -diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c -index 74a9375..89c55d4 100644 ---- a/drivers/usb/serial/sierra.c -+++ b/drivers/usb/serial/sierra.c -@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = { - { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), - .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist - }, -+ { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */ - /* AT&T Direct IP LTE modems */ - { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), - .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist -diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c -index 073b4a1..ff3c98f 100644 ---- a/drivers/xen/gntdev.c -+++ b/drivers/xen/gntdev.c -@@ -529,12 +529,14 @@ static int gntdev_release(struct inode *inode, struct file *flip) - - pr_debug("priv %p\n", priv); - -+ mutex_lock(&priv->lock); - while (!list_empty(&priv->maps)) { - map = list_entry(priv->maps.next, struct grant_map, next); - list_del(&map->next); - gntdev_put_map(NULL /* already removed */, map); - } - WARN_ON(!list_empty(&priv->freeable_maps)); -+ mutex_unlock(&priv->lock); - - if (use_ptemod) - mmu_notifier_unregister(&priv->mn, priv->mm); -diff --git a/fs/dcache.c b/fs/dcache.c -index 3d2f27b..df323f8 100644 ---- a/fs/dcache.c -+++ b/fs/dcache.c -@@ -244,17 +244,8 @@ static void __d_free(struct rcu_head *head) - kmem_cache_free(dentry_cache, dentry); - } - --/* -- * no locks, please. -- */ --static void d_free(struct dentry *dentry) -+static void dentry_free(struct dentry *dentry) - { -- WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); -- BUG_ON((int)dentry->d_lockref.count > 0); -- this_cpu_dec(nr_dentry); -- if (dentry->d_op && dentry->d_op->d_release) -- dentry->d_op->d_release(dentry); -- - /* if dentry was never visible to RCU, immediate free is OK */ - if (!(dentry->d_flags & DCACHE_RCUACCESS)) - __d_free(&dentry->d_u.d_rcu); -@@ -402,56 +393,6 @@ static void dentry_lru_add(struct dentry *dentry) - d_lru_add(dentry); - } - --/* -- * Remove a dentry with references from the LRU. -- * -- * If we are on the shrink list, then we can get to try_prune_one_dentry() and -- * lose our last reference through the parent walk. In this case, we need to -- * remove ourselves from the shrink list, not the LRU. -- */ --static void dentry_lru_del(struct dentry *dentry) --{ -- if (dentry->d_flags & DCACHE_LRU_LIST) { -- if (dentry->d_flags & DCACHE_SHRINK_LIST) -- return d_shrink_del(dentry); -- d_lru_del(dentry); -- } --} -- --/** -- * d_kill - kill dentry and return parent -- * @dentry: dentry to kill -- * @parent: parent dentry -- * -- * The dentry must already be unhashed and removed from the LRU. -- * -- * If this is the root of the dentry tree, return NULL. -- * -- * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by -- * d_kill. -- */ --static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) -- __releases(dentry->d_lock) -- __releases(parent->d_lock) -- __releases(dentry->d_inode->i_lock) --{ -- __list_del_entry(&dentry->d_child); -- /* -- * Inform d_walk() that we are no longer attached to the -- * dentry tree -- */ -- dentry->d_flags |= DCACHE_DENTRY_KILLED; -- if (parent) -- spin_unlock(&parent->d_lock); -- dentry_iput(dentry); -- /* -- * dentry_iput drops the locks, at which point nobody (except -- * transient RCU lookups) can reach this dentry. -- */ -- d_free(dentry); -- return parent; --} -- - /** - * d_drop - drop a dentry - * @dentry: dentry to drop -@@ -509,7 +450,14 @@ dentry_kill(struct dentry *dentry, int unlock_on_failure) - __releases(dentry->d_lock) - { - struct inode *inode; -- struct dentry *parent; -+ struct dentry *parent = NULL; -+ bool can_free = true; -+ -+ if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { -+ can_free = dentry->d_flags & DCACHE_MAY_FREE; -+ spin_unlock(&dentry->d_lock); -+ goto out; -+ } - - inode = dentry->d_inode; - if (inode && !spin_trylock(&inode->i_lock)) { -@@ -520,9 +468,7 @@ relock: - } - return dentry; /* try again with same dentry */ - } -- if (IS_ROOT(dentry)) -- parent = NULL; -- else -+ if (!IS_ROOT(dentry)) - parent = dentry->d_parent; - if (parent && !spin_trylock(&parent->d_lock)) { - if (inode) -@@ -542,10 +488,40 @@ relock: - if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry)) - dentry->d_op->d_prune(dentry); - -- dentry_lru_del(dentry); -+ if (dentry->d_flags & DCACHE_LRU_LIST) { -+ if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) -+ d_lru_del(dentry); -+ } - /* if it was on the hash then remove it */ - __d_drop(dentry); -- return d_kill(dentry, parent); -+ __list_del_entry(&dentry->d_child); -+ /* -+ * Inform d_walk() that we are no longer attached to the -+ * dentry tree -+ */ -+ dentry->d_flags |= DCACHE_DENTRY_KILLED; -+ if (parent) -+ spin_unlock(&parent->d_lock); -+ dentry_iput(dentry); -+ /* -+ * dentry_iput drops the locks, at which point nobody (except -+ * transient RCU lookups) can reach this dentry. -+ */ -+ BUG_ON((int)dentry->d_lockref.count > 0); -+ this_cpu_dec(nr_dentry); -+ if (dentry->d_op && dentry->d_op->d_release) -+ dentry->d_op->d_release(dentry); -+ -+ spin_lock(&dentry->d_lock); -+ if (dentry->d_flags & DCACHE_SHRINK_LIST) { -+ dentry->d_flags |= DCACHE_MAY_FREE; -+ can_free = false; -+ } -+ spin_unlock(&dentry->d_lock); -+out: -+ if (likely(can_free)) -+ dentry_free(dentry); -+ return parent; - } - - /* -@@ -817,65 +793,13 @@ restart: - } - EXPORT_SYMBOL(d_prune_aliases); - --/* -- * Try to throw away a dentry - free the inode, dput the parent. -- * Requires dentry->d_lock is held, and dentry->d_count == 0. -- * Releases dentry->d_lock. -- * -- * This may fail if locks cannot be acquired no problem, just try again. -- */ --static struct dentry * try_prune_one_dentry(struct dentry *dentry) -- __releases(dentry->d_lock) --{ -- struct dentry *parent; -- -- parent = dentry_kill(dentry, 0); -- /* -- * If dentry_kill returns NULL, we have nothing more to do. -- * if it returns the same dentry, trylocks failed. In either -- * case, just loop again. -- * -- * Otherwise, we need to prune ancestors too. This is necessary -- * to prevent quadratic behavior of shrink_dcache_parent(), but -- * is also expected to be beneficial in reducing dentry cache -- * fragmentation. -- */ -- if (!parent) -- return NULL; -- if (parent == dentry) -- return dentry; -- -- /* Prune ancestors. */ -- dentry = parent; -- while (dentry) { -- if (lockref_put_or_lock(&dentry->d_lockref)) -- return NULL; -- dentry = dentry_kill(dentry, 1); -- } -- return NULL; --} -- - static void shrink_dentry_list(struct list_head *list) - { -- struct dentry *dentry; -+ struct dentry *dentry, *parent; - -- rcu_read_lock(); -- for (;;) { -- dentry = list_entry_rcu(list->prev, struct dentry, d_lru); -- if (&dentry->d_lru == list) -- break; /* empty */ -- -- /* -- * Get the dentry lock, and re-verify that the dentry is -- * this on the shrinking list. If it is, we know that -- * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set. -- */ -+ while (!list_empty(list)) { -+ dentry = list_entry(list->prev, struct dentry, d_lru); - spin_lock(&dentry->d_lock); -- if (dentry != list_entry(list->prev, struct dentry, d_lru)) { -- spin_unlock(&dentry->d_lock); -- continue; -- } -- - /* - * The dispose list is isolated and dentries are not accounted - * to the LRU here, so we can simply remove it from the list -@@ -887,30 +811,38 @@ static void shrink_dentry_list(struct list_head *list) - * We found an inuse dentry which was not removed from - * the LRU because of laziness during lookup. Do not free it. - */ -- if (dentry->d_lockref.count) { -+ if ((int)dentry->d_lockref.count > 0) { - spin_unlock(&dentry->d_lock); - continue; - } -- rcu_read_unlock(); - -+ parent = dentry_kill(dentry, 0); - /* -- * If 'try_to_prune()' returns a dentry, it will -- * be the same one we passed in, and d_lock will -- * have been held the whole time, so it will not -- * have been added to any other lists. We failed -- * to get the inode lock. -- * -- * We just add it back to the shrink list. -+ * If dentry_kill returns NULL, we have nothing more to do. - */ -- dentry = try_prune_one_dentry(dentry); -+ if (!parent) -+ continue; - -- rcu_read_lock(); -- if (dentry) { -+ if (unlikely(parent == dentry)) { -+ /* -+ * trylocks have failed and d_lock has been held the -+ * whole time, so it could not have been added to any -+ * other lists. Just add it back to the shrink list. -+ */ - d_shrink_add(dentry, list); - spin_unlock(&dentry->d_lock); -+ continue; - } -+ /* -+ * We need to prune ancestors too. This is necessary to prevent -+ * quadratic behavior of shrink_dcache_parent(), but is also -+ * expected to be beneficial in reducing dentry cache -+ * fragmentation. -+ */ -+ dentry = parent; -+ while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) -+ dentry = dentry_kill(dentry, 1); - } -- rcu_read_unlock(); - } - - static enum lru_status -@@ -1264,34 +1196,23 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) - if (data->start == dentry) - goto out; - -- /* -- * move only zero ref count dentries to the dispose list. -- * -- * Those which are presently on the shrink list, being processed -- * by shrink_dentry_list(), shouldn't be moved. Otherwise the -- * loop in shrink_dcache_parent() might not make any progress -- * and loop forever. -- */ -- if (dentry->d_lockref.count) { -- dentry_lru_del(dentry); -- } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { -- /* -- * We can't use d_lru_shrink_move() because we -- * need to get the global LRU lock and do the -- * LRU accounting. -- */ -- d_lru_del(dentry); -- d_shrink_add(dentry, &data->dispose); -+ if (dentry->d_flags & DCACHE_SHRINK_LIST) { - data->found++; -- ret = D_WALK_NORETRY; -+ } else { -+ if (dentry->d_flags & DCACHE_LRU_LIST) -+ d_lru_del(dentry); -+ if (!dentry->d_lockref.count) { -+ d_shrink_add(dentry, &data->dispose); -+ data->found++; -+ } - } - /* - * We can return to the caller if we have found some (this - * ensures forward progress). We'll be coming back to find - * the rest. - */ -- if (data->found && need_resched()) -- ret = D_WALK_QUIT; -+ if (!list_empty(&data->dispose)) -+ ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; - out: - return ret; - } -@@ -1321,45 +1242,35 @@ void shrink_dcache_parent(struct dentry *parent) - } - EXPORT_SYMBOL(shrink_dcache_parent); - --static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry) -+static enum d_walk_ret umount_check(void *_data, struct dentry *dentry) - { -- struct select_data *data = _data; -- enum d_walk_ret ret = D_WALK_CONTINUE; -+ /* it has busy descendents; complain about those instead */ -+ if (!list_empty(&dentry->d_subdirs)) -+ return D_WALK_CONTINUE; - -- if (dentry->d_lockref.count) { -- dentry_lru_del(dentry); -- if (likely(!list_empty(&dentry->d_subdirs))) -- goto out; -- if (dentry == data->start && dentry->d_lockref.count == 1) -- goto out; -- printk(KERN_ERR -- "BUG: Dentry %p{i=%lx,n=%s}" -- " still in use (%d)" -- " [unmount of %s %s]\n", -+ /* root with refcount 1 is fine */ -+ if (dentry == _data && dentry->d_lockref.count == 1) -+ return D_WALK_CONTINUE; -+ -+ printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} " -+ " still in use (%d) [unmount of %s %s]\n", - dentry, - dentry->d_inode ? - dentry->d_inode->i_ino : 0UL, -- dentry->d_name.name, -+ dentry, - dentry->d_lockref.count, - dentry->d_sb->s_type->name, - dentry->d_sb->s_id); -- BUG(); -- } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { -- /* -- * We can't use d_lru_shrink_move() because we -- * need to get the global LRU lock and do the -- * LRU accounting. -- */ -- if (dentry->d_flags & DCACHE_LRU_LIST) -- d_lru_del(dentry); -- d_shrink_add(dentry, &data->dispose); -- data->found++; -- ret = D_WALK_NORETRY; -- } --out: -- if (data->found && need_resched()) -- ret = D_WALK_QUIT; -- return ret; -+ WARN_ON(1); -+ return D_WALK_CONTINUE; -+} -+ -+static void do_one_tree(struct dentry *dentry) -+{ -+ shrink_dcache_parent(dentry); -+ d_walk(dentry, dentry, umount_check, NULL); -+ d_drop(dentry); -+ dput(dentry); - } - - /* -@@ -1369,40 +1280,15 @@ void shrink_dcache_for_umount(struct super_block *sb) - { - struct dentry *dentry; - -- if (down_read_trylock(&sb->s_umount)) -- BUG(); -+ WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked"); - - dentry = sb->s_root; - sb->s_root = NULL; -- for (;;) { -- struct select_data data; -- -- INIT_LIST_HEAD(&data.dispose); -- data.start = dentry; -- data.found = 0; -- -- d_walk(dentry, &data, umount_collect, NULL); -- if (!data.found) -- break; -- -- shrink_dentry_list(&data.dispose); -- cond_resched(); -- } -- d_drop(dentry); -- dput(dentry); -+ do_one_tree(dentry); - - while (!hlist_bl_empty(&sb->s_anon)) { -- struct select_data data; -- dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash); -- -- INIT_LIST_HEAD(&data.dispose); -- data.start = NULL; -- data.found = 0; -- -- d_walk(dentry, &data, umount_collect, NULL); -- if (data.found) -- shrink_dentry_list(&data.dispose); -- cond_resched(); -+ dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash)); -+ do_one_tree(dentry); - } - } - -diff --git a/fs/namei.c b/fs/namei.c -index ccb8000..c6fa079 100644 ---- a/fs/namei.c -+++ b/fs/namei.c -@@ -3171,7 +3171,7 @@ static struct file *path_openat(int dfd, struct filename *pathname, - - if (unlikely(file->f_flags & __O_TMPFILE)) { - error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened); -- goto out; -+ goto out2; - } - - error = path_init(dfd, pathname->name, flags | LOOKUP_PARENT, nd, &base); -@@ -3209,6 +3209,7 @@ out: - path_put(&nd->root); - if (base) - fput(base); -+out2: - if (!(opened & FILE_OPENED)) { - BUG_ON(!error); - put_filp(file); -diff --git a/fs/notify/mark.c b/fs/notify/mark.c -index 923fe4a..6bffc33 100644 ---- a/fs/notify/mark.c -+++ b/fs/notify/mark.c -@@ -293,16 +293,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, - unsigned int flags) - { - struct fsnotify_mark *lmark, *mark; -+ LIST_HEAD(to_free); - -+ /* -+ * We have to be really careful here. Anytime we drop mark_mutex, e.g. -+ * fsnotify_clear_marks_by_inode() can come and free marks. Even in our -+ * to_free list so we have to use mark_mutex even when accessing that -+ * list. And freeing mark requires us to drop mark_mutex. So we can -+ * reliably free only the first mark in the list. That's why we first -+ * move marks to free to to_free list in one go and then free marks in -+ * to_free list one by one. -+ */ - mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); - list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { -- if (mark->flags & flags) { -- fsnotify_get_mark(mark); -- fsnotify_destroy_mark_locked(mark, group); -- fsnotify_put_mark(mark); -- } -+ if (mark->flags & flags) -+ list_move(&mark->g_list, &to_free); - } - mutex_unlock(&group->mark_mutex); -+ -+ while (1) { -+ mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); -+ if (list_empty(&to_free)) { -+ mutex_unlock(&group->mark_mutex); -+ break; -+ } -+ mark = list_first_entry(&to_free, struct fsnotify_mark, g_list); -+ fsnotify_get_mark(mark); -+ fsnotify_destroy_mark_locked(mark, group); -+ mutex_unlock(&group->mark_mutex); -+ fsnotify_put_mark(mark); -+ } - } - - /* -diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c -index 1998695..fa74259 100644 ---- a/fs/ocfs2/dlmglue.c -+++ b/fs/ocfs2/dlmglue.c -@@ -3973,9 +3973,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) - osb->dc_work_sequence = osb->dc_wake_sequence; - - processed = osb->blocked_lock_count; -- while (processed) { -- BUG_ON(list_empty(&osb->blocked_lock_list)); -- -+ /* -+ * blocked lock processing in this loop might call iput which can -+ * remove items off osb->blocked_lock_list. Downconvert up to -+ * 'processed' number of locks, but stop short if we had some -+ * removed in ocfs2_mark_lockres_freeing when downconverting. -+ */ -+ while (processed && !list_empty(&osb->blocked_lock_list)) { - lockres = list_entry(osb->blocked_lock_list.next, - struct ocfs2_lock_res, l_blocked_list); - list_del_init(&lockres->l_blocked_list); -diff --git a/fs/signalfd.c b/fs/signalfd.c -index 424b7b6..148f8e7 100644 ---- a/fs/signalfd.c -+++ b/fs/signalfd.c -@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, - * Other callers might not initialize the si_lsb field, - * so check explicitly for the right codes here. - */ -- if (kinfo->si_code == BUS_MCEERR_AR || -- kinfo->si_code == BUS_MCEERR_AO) -+ if (kinfo->si_signo == SIGBUS && -+ (kinfo->si_code == BUS_MCEERR_AR || -+ kinfo->si_code == BUS_MCEERR_AO)) - err |= __put_user((short) kinfo->si_addr_lsb, - &uinfo->ssi_addr_lsb); - #endif -diff --git a/include/linux/dcache.h b/include/linux/dcache.h -index 0f0eb1c..2a23ecb 100644 ---- a/include/linux/dcache.h -+++ b/include/linux/dcache.h -@@ -221,6 +221,8 @@ struct dentry_operations { - #define DCACHE_SYMLINK_TYPE 0x00300000 /* Symlink */ - #define DCACHE_FILE_TYPE 0x00400000 /* Other file type */ - -+#define DCACHE_MAY_FREE 0x00800000 -+ - extern seqlock_t rename_lock; - - static inline int dname_external(const struct dentry *dentry) -diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h -index 30db069..788c5aa 100644 ---- a/include/uapi/linux/pci_regs.h -+++ b/include/uapi/linux/pci_regs.h -@@ -319,6 +319,7 @@ - #define PCI_MSIX_PBA 8 /* Pending Bit Array offset */ - #define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */ - #define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */ -+#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */ - #define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ - - /* MSI-X Table entry format */ -diff --git a/ipc/mqueue.c b/ipc/mqueue.c -index c3b3117..9699d3f 100644 ---- a/ipc/mqueue.c -+++ b/ipc/mqueue.c -@@ -143,7 +143,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) - if (!leaf) - return -ENOMEM; - INIT_LIST_HEAD(&leaf->msg_list); -- info->qsize += sizeof(*leaf); - } - leaf->priority = msg->m_type; - rb_link_node(&leaf->rb_node, parent, p); -@@ -188,7 +187,6 @@ try_again: - "lazy leaf delete!\n"); - rb_erase(&leaf->rb_node, &info->msg_tree); - if (info->node_cache) { -- info->qsize -= sizeof(*leaf); - kfree(leaf); - } else { - info->node_cache = leaf; -@@ -201,7 +199,6 @@ try_again: - if (list_empty(&leaf->msg_list)) { - rb_erase(&leaf->rb_node, &info->msg_tree); - if (info->node_cache) { -- info->qsize -= sizeof(*leaf); - kfree(leaf); - } else { - info->node_cache = leaf; -@@ -1026,7 +1023,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, - /* Save our speculative allocation into the cache */ - INIT_LIST_HEAD(&new_leaf->msg_list); - info->node_cache = new_leaf; -- info->qsize += sizeof(*new_leaf); - new_leaf = NULL; - } else { - kfree(new_leaf); -@@ -1133,7 +1129,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, - /* Save our speculative allocation into the cache */ - INIT_LIST_HEAD(&new_leaf->msg_list); - info->node_cache = new_leaf; -- info->qsize += sizeof(*new_leaf); - } else { - kfree(new_leaf); - } -diff --git a/kernel/signal.c b/kernel/signal.c -index 52f881d..15c22ee 100644 ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -2768,7 +2768,8 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from) - * Other callers might not initialize the si_lsb field, - * so check explicitly for the right codes here. - */ -- if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) -+ if (from->si_signo == SIGBUS && -+ (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)) - err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); - #endif - break; -@@ -3035,7 +3036,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, - int, sig, - struct compat_siginfo __user *, uinfo) - { -- siginfo_t info; -+ siginfo_t info = {}; - int ret = copy_siginfo_from_user32(&info, uinfo); - if (unlikely(ret)) - return ret; -@@ -3081,7 +3082,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, - int, sig, - struct compat_siginfo __user *, uinfo) - { -- siginfo_t info; -+ siginfo_t info = {}; - - if (copy_siginfo_from_user32(&info, uinfo)) - return -EFAULT; -diff --git a/mm/vmscan.c b/mm/vmscan.c -index b850ced6..88edf53 100644 ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -871,21 +871,17 @@ static unsigned long shrink_page_list(struct list_head *page_list, - * - * 2) Global reclaim encounters a page, memcg encounters a - * page that is not marked for immediate reclaim or -- * the caller does not have __GFP_IO. In this case mark -+ * the caller does not have __GFP_FS (or __GFP_IO if it's -+ * simply going to swap, not to fs). In this case mark - * the page for immediate reclaim and continue scanning. - * -- * __GFP_IO is checked because a loop driver thread might -+ * Require may_enter_fs because we would wait on fs, which -+ * may not have submitted IO yet. And the loop driver might - * enter reclaim, and deadlock if it waits on a page for - * which it is needed to do the write (loop masks off - * __GFP_IO|__GFP_FS for this reason); but more thought - * would probably show more reasons. - * -- * Don't require __GFP_FS, since we're not going into the -- * FS, just waiting on its writeback completion. Worryingly, -- * ext4 gfs2 and xfs allocate pages with -- * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing -- * may_enter_fs here is liable to OOM on them. -- * - * 3) memcg encounters a page that is not already marked - * PageReclaim. memcg does not have any dirty pages - * throttling so we could easily OOM just because too many -@@ -902,7 +898,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, - - /* Case 2 above */ - } else if (global_reclaim(sc) || -- !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { -+ !PageReclaim(page) || !may_enter_fs) { - /* - * This is slightly racy - end_page_writeback() - * might have just cleared PageReclaim, then -diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c -index 085c496..9d8e420 100644 ---- a/security/integrity/ima/ima_policy.c -+++ b/security/integrity/ima/ima_policy.c -@@ -27,6 +27,8 @@ - #define IMA_UID 0x0008 - #define IMA_FOWNER 0x0010 - #define IMA_FSUUID 0x0020 -+#define IMA_INMASK 0x0040 -+#define IMA_EUID 0x0080 - - #define UNKNOWN 0 - #define MEASURE 0x0001 /* same as IMA_MEASURE */ -@@ -171,6 +173,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule, - return false; - if ((rule->flags & IMA_MASK) && rule->mask != mask) - return false; -+ if ((rule->flags & IMA_INMASK) && -+ (!(rule->mask & mask) && func != POST_SETATTR)) -+ return false; - if ((rule->flags & IMA_FSMAGIC) - && rule->fsmagic != inode->i_sb->s_magic) - return false; -@@ -179,6 +184,16 @@ static bool ima_match_rules(struct ima_rule_entry *rule, - return false; - if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid)) - return false; -+ if (rule->flags & IMA_EUID) { -+ if (has_capability_noaudit(current, CAP_SETUID)) { -+ if (!uid_eq(rule->uid, cred->euid) -+ && !uid_eq(rule->uid, cred->suid) -+ && !uid_eq(rule->uid, cred->uid)) -+ return false; -+ } else if (!uid_eq(rule->uid, cred->euid)) -+ return false; -+ } -+ - if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid)) - return false; - for (i = 0; i < MAX_LSM_RULES; i++) { -@@ -350,7 +365,8 @@ enum { - Opt_audit, - Opt_obj_user, Opt_obj_role, Opt_obj_type, - Opt_subj_user, Opt_subj_role, Opt_subj_type, -- Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner, -+ Opt_func, Opt_mask, Opt_fsmagic, -+ Opt_uid, Opt_euid, Opt_fowner, - Opt_appraise_type, Opt_fsuuid, Opt_permit_directio - }; - -@@ -371,6 +387,7 @@ static match_table_t policy_tokens = { - {Opt_fsmagic, "fsmagic=%s"}, - {Opt_fsuuid, "fsuuid=%s"}, - {Opt_uid, "uid=%s"}, -+ {Opt_euid, "euid=%s"}, - {Opt_fowner, "fowner=%s"}, - {Opt_appraise_type, "appraise_type=%s"}, - {Opt_permit_directio, "permit_directio"}, -@@ -412,6 +429,7 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value) - static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) - { - struct audit_buffer *ab; -+ char *from; - char *p; - int result = 0; - -@@ -500,18 +518,23 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) - if (entry->mask) - result = -EINVAL; - -- if ((strcmp(args[0].from, "MAY_EXEC")) == 0) -+ from = args[0].from; -+ if (*from == '^') -+ from++; -+ -+ if ((strcmp(from, "MAY_EXEC")) == 0) - entry->mask = MAY_EXEC; -- else if (strcmp(args[0].from, "MAY_WRITE") == 0) -+ else if (strcmp(from, "MAY_WRITE") == 0) - entry->mask = MAY_WRITE; -- else if (strcmp(args[0].from, "MAY_READ") == 0) -+ else if (strcmp(from, "MAY_READ") == 0) - entry->mask = MAY_READ; -- else if (strcmp(args[0].from, "MAY_APPEND") == 0) -+ else if (strcmp(from, "MAY_APPEND") == 0) - entry->mask = MAY_APPEND; - else - result = -EINVAL; - if (!result) -- entry->flags |= IMA_MASK; -+ entry->flags |= (*args[0].from == '^') -+ ? IMA_INMASK : IMA_MASK; - break; - case Opt_fsmagic: - ima_log_string(ab, "fsmagic", args[0].from); -@@ -542,6 +565,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) - break; - case Opt_uid: - ima_log_string(ab, "uid", args[0].from); -+ case Opt_euid: -+ if (token == Opt_euid) -+ ima_log_string(ab, "euid", args[0].from); - - if (uid_valid(entry->uid)) { - result = -EINVAL; -@@ -550,11 +576,14 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) - - result = strict_strtoul(args[0].from, 10, &lnum); - if (!result) { -- entry->uid = make_kuid(current_user_ns(), (uid_t)lnum); -- if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum)) -+ entry->uid = make_kuid(current_user_ns(), -+ (uid_t) lnum); -+ if (!uid_valid(entry->uid) || -+ (uid_t)lnum != lnum) - result = -EINVAL; - else -- entry->flags |= IMA_UID; -+ entry->flags |= (token == Opt_uid) -+ ? IMA_UID : IMA_EUID; - } - break; - case Opt_fowner: -diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c -index 51e2080..7b0aac9 100644 ---- a/sound/pci/hda/patch_cirrus.c -+++ b/sound/pci/hda/patch_cirrus.c -@@ -1002,9 +1002,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec, - - spec->spdif_present = spdif_present; - /* SPDIF TX on/off */ -- if (spdif_present) -- snd_hda_set_pin_ctl(codec, spdif_pin, -- spdif_present ? PIN_OUT : 0); -+ snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0); - - cs_automute(codec); - } -diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c -index 651e2fe..dfa9755 100644 ---- a/sound/soc/codecs/pcm1681.c -+++ b/sound/soc/codecs/pcm1681.c -@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec) - - if (val != -1) { - regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL, -- PCM1681_DEEMPH_RATE_MASK, val); -+ PCM1681_DEEMPH_RATE_MASK, val << 3); - enable = 1; - } else - enable = 0; diff --git a/3.14.51/4420_grsecurity-3.1-3.14.51-201508181951.patch b/3.14.51/4420_grsecurity-3.1-3.14.51-201508181951.patch deleted file mode 100644 index 80024c4..0000000 --- a/3.14.51/4420_grsecurity-3.1-3.14.51-201508181951.patch +++ /dev/null @@ -1,142863 +0,0 @@ -diff --git a/Documentation/dontdiff b/Documentation/dontdiff -index b89a739..e289b9b 100644 ---- a/Documentation/dontdiff -+++ b/Documentation/dontdiff -@@ -2,9 +2,11 @@ - *.aux - *.bin - *.bz2 -+*.c.[012]*.* - *.cis - *.cpio - *.csp -+*.dbg - *.dsp - *.dvi - *.elf -@@ -14,6 +16,7 @@ - *.gcov - *.gen.S - *.gif -+*.gmo - *.grep - *.grp - *.gz -@@ -48,14 +51,17 @@ - *.tab.h - *.tex - *.ver -+*.vim - *.xml - *.xz - *_MODULES -+*_reg_safe.h - *_vga16.c - *~ - \#*# - *.9 --.* -+.[^g]* -+.gen* - .*.d - .mm - 53c700_d.h -@@ -69,9 +75,11 @@ Image - Module.markers - Module.symvers - PENDING -+PERF* - SCCS - System.map* - TAGS -+TRACEEVENT-CFLAGS - aconf - af_names.h - aic7*reg.h* -@@ -80,6 +88,7 @@ aic7*seq.h* - aicasm - aicdb.h* - altivec*.c -+ashldi3.S - asm-offsets.h - asm_offsets.h - autoconf.h* -@@ -92,32 +101,40 @@ bounds.h - bsetup - btfixupprep - build -+builtin-policy.h - bvmlinux - bzImage* - capability_names.h - capflags.c - classlist.h* -+clut_vga16.c -+common-cmds.h - comp*.log - compile.h* - conf - config - config-* - config_data.h* -+config.c - config.mak - config.mak.autogen -+config.tmp - conmakehash - consolemap_deftbl.c* - cpustr.h - crc32table.h* - cscope.* - defkeymap.c -+devicetable-offsets.h - devlist.h* - dnotify_test - docproc - dslm -+dtc-lexer.lex.c - elf2ecoff - elfconfig.h* - evergreen_reg_safe.h -+exception_policy.conf - fixdep - flask.h - fore200e_mkfirm -@@ -125,12 +142,15 @@ fore200e_pca_fw.c* - gconf - gconf.glade.h - gen-devlist -+gen-kdb_cmds.c - gen_crc32table - gen_init_cpio - generated - genheaders - genksyms - *_gray256.c -+hash -+hid-example - hpet_example - hugepage-mmap - hugepage-shm -@@ -145,14 +165,14 @@ int32.c - int4.c - int8.c - kallsyms --kconfig -+kern_constants.h - keywords.c - ksym.c* - ksym.h* - kxgettext - lex.c - lex.*.c --linux -+lib1funcs.S - logo_*.c - logo_*_clut224.c - logo_*_mono.c -@@ -162,14 +182,15 @@ mach-types.h - machtypes.h - map - map_hugetlb --media - mconf -+mdp - miboot* - mk_elfconfig - mkboot - mkbugboot - mkcpustr - mkdep -+mkpiggy - mkprep - mkregtable - mktables -@@ -185,6 +206,8 @@ oui.c* - page-types - parse.c - parse.h -+parse-events* -+pasyms.h - patches* - pca200e.bin - pca200e_ecd.bin2 -@@ -194,6 +217,7 @@ perf-archive - piggyback - piggy.gzip - piggy.S -+pmu-* - pnmtologo - ppc_defs.h* - pss_boot.h -@@ -203,7 +227,12 @@ r200_reg_safe.h - r300_reg_safe.h - r420_reg_safe.h - r600_reg_safe.h -+randomize_layout_hash.h -+randomize_layout_seed.h -+realmode.lds -+realmode.relocs - recordmcount -+regdb.c - relocs - rlim_names.h - rn50_reg_safe.h -@@ -213,8 +242,12 @@ series - setup - setup.bin - setup.elf -+signing_key* -+size_overflow_hash.h - sImage -+slabinfo - sm_tbl* -+sortextable - split-include - syscalltab.h - tables.c -@@ -224,6 +257,7 @@ tftpboot.img - timeconst.h - times.h* - trix_boot.h -+user_constants.h - utsrelease.h* - vdso-syms.lds - vdso.lds -@@ -235,13 +269,17 @@ vdso32.lds - vdso32.so.dbg - vdso64.lds - vdso64.so.dbg -+vdsox32.lds -+vdsox32-syms.lds - version.h* - vmImage - vmlinux - vmlinux-* - vmlinux.aout - vmlinux.bin.all -+vmlinux.bin.bz2 - vmlinux.lds -+vmlinux.relocs - vmlinuz - voffset.h - vsyscall.lds -@@ -249,9 +287,12 @@ vsyscall_32.lds - wanxlfw.inc - uImage - unifdef -+utsrelease.h - wakeup.bin - wakeup.elf - wakeup.lds -+x509* - zImage* - zconf.hash.c -+zconf.lex.c - zoffset.h -diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt -index 5d91ba1..ef1d374 100644 ---- a/Documentation/kernel-parameters.txt -+++ b/Documentation/kernel-parameters.txt -@@ -1084,6 +1084,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. - Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0. - Default: 1024 - -+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to -+ ignore grsecurity's /proc restrictions -+ -+ grsec_sysfs_restrict= Format: 0 | 1 -+ Default: 1 -+ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config -+ - hashdist= [KNL,NUMA] Large hashes allocated during boot - are distributed across NUMA nodes. Defaults on - for 64-bit NUMA, off otherwise. -@@ -2081,6 +2088,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. - noexec=on: enable non-executable mappings (default) - noexec=off: disable non-executable mappings - -+ nopcid [X86-64] -+ Disable PCID (Process-Context IDentifier) even if it -+ is supported by the processor. -+ - nosmap [X86] - Disable SMAP (Supervisor Mode Access Prevention) - even if it is supported by processor. -@@ -2348,6 +2359,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted. - the specified number of seconds. This is to be used if - your oopses keep scrolling off the screen. - -+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain -+ virtualization environments that don't cope well with the -+ expand down segment used by UDEREF on X86-32 or the frequent -+ page table updates on X86-64. -+ -+ pax_sanitize_slab= -+ Format: { 0 | 1 | off | fast | full } -+ Options '0' and '1' are only provided for backward -+ compatibility, 'off' or 'fast' should be used instead. -+ 0|off : disable slab object sanitization -+ 1|fast: enable slab object sanitization excluding -+ whitelisted slabs (default) -+ full : sanitize all slabs, even the whitelisted ones -+ -+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already. -+ -+ pax_extra_latent_entropy -+ Enable a very simple form of latent entropy extraction -+ from the first 4GB of memory as the bootmem allocator -+ passes the memory pages to the buddy allocator. -+ -+ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF -+ when the processor supports PCID. -+ - pcbit= [HW,ISDN] - - pcd. [PARIDE] -diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt -index 855d9b3..154c500 100644 ---- a/Documentation/sysctl/kernel.txt -+++ b/Documentation/sysctl/kernel.txt -@@ -41,6 +41,7 @@ show up in /proc/sys/kernel: - - kptr_restrict - - kstack_depth_to_print [ X86 only ] - - l2cr [ PPC only ] -+- modify_ldt [ X86 only ] - - modprobe ==> Documentation/debugging-modules.txt - - modules_disabled - - msg_next_id [ sysv ipc ] -@@ -381,6 +382,20 @@ This flag controls the L2 cache of G3 processor boards. If - - ============================================================== - -+modify_ldt: (X86 only) -+ -+Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT -+(Local Descriptor Table) may be needed to run a 16-bit or segmented code -+such as Dosemu or Wine. This is done via a system call which is not needed -+to run portable applications, and which can sometimes be abused to exploit -+some weaknesses of the architecture, opening new vulnerabilities. -+ -+This sysctl allows one to increase the system's security by disabling the -+system call, or to restore compatibility with specific applications when it -+was already disabled. -+ -+============================================================== -+ - modules_disabled: - - A toggle value indicating if modules are allowed to be loaded -diff --git a/Makefile b/Makefile -index 83275d8e..235ffae 100644 ---- a/Makefile -+++ b/Makefile -@@ -244,8 +244,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ - - HOSTCC = gcc - HOSTCXX = g++ --HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89 --HOSTCXXFLAGS = -O2 -+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -std=gnu89 -fno-delete-null-pointer-checks -+HOSTCFLAGS += $(call cc-option, -Wno-empty-body) -+HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds - - # Decide whether to build built-in, modular, or both. - # Normally, just do built-in. -@@ -425,8 +426,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \ - # Rules shared between *config targets and build targets - - # Basic helpers built in scripts/ --PHONY += scripts_basic --scripts_basic: -+PHONY += scripts_basic gcc-plugins -+scripts_basic: gcc-plugins - $(Q)$(MAKE) $(build)=scripts/basic - $(Q)rm -f .tmp_quiet_recordmcount - -@@ -587,6 +588,75 @@ else - KBUILD_CFLAGS += -O2 - endif - -+# Tell gcc to never replace conditional load with a non-conditional one -+KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) -+ -+ifndef DISABLE_PAX_PLUGINS -+ifeq ($(call cc-ifversion, -ge, 0408, y), y) -+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)") -+else -+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)") -+endif -+ifneq ($(PLUGINCC),) -+ifdef CONFIG_PAX_CONSTIFY_PLUGIN -+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN -+endif -+ifdef CONFIG_PAX_MEMORY_STACKLEAK -+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN -+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100 -+endif -+ifdef CONFIG_KALLOCSTAT_PLUGIN -+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so -+endif -+ifdef CONFIG_PAX_KERNEXEC_PLUGIN -+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so -+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN -+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN -+endif -+ifdef CONFIG_GRKERNSEC_RANDSTRUCT -+RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN -+ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE -+RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode -+endif -+endif -+ifdef CONFIG_CHECKER_PLUGIN -+ifeq ($(call cc-ifversion, -ge, 0406, y), y) -+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN -+endif -+endif -+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so -+ifdef CONFIG_PAX_SIZE_OVERFLOW -+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN -+endif -+ifdef CONFIG_PAX_LATENT_ENTROPY -+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN -+endif -+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK -+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN -+endif -+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS) -+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) -+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS) -+GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS) -+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS) -+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS -+ifeq ($(KBUILD_EXTMOD),) -+gcc-plugins: -+ $(Q)$(MAKE) $(build)=tools/gcc -+else -+gcc-plugins: ; -+endif -+else -+gcc-plugins: -+ifeq ($(call cc-ifversion, -ge, 0405, y), y) -+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.)) -+else -+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least" -+endif -+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active." -+endif -+endif -+ - include $(srctree)/arch/$(SRCARCH)/Makefile - - ifdef CONFIG_READABLE_ASM -@@ -783,7 +853,7 @@ export mod_sign_cmd - - - ifeq ($(KBUILD_EXTMOD),) --core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ -+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ - - vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ - $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ -@@ -832,6 +902,8 @@ endif - - # The actual objects are generated when descending, - # make sure no implicit rule kicks in -+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) -+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) - $(sort $(vmlinux-deps)): $(vmlinux-dirs) ; - - # Handle descending into subdirectories listed in $(vmlinux-dirs) -@@ -841,7 +913,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ; - # Error messages still appears in the original language - - PHONY += $(vmlinux-dirs) --$(vmlinux-dirs): prepare scripts -+$(vmlinux-dirs): gcc-plugins prepare scripts - $(Q)$(MAKE) $(build)=$@ - - define filechk_kernel.release -@@ -884,10 +956,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \ - - archprepare: archheaders archscripts prepare1 scripts_basic - -+prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) -+prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) - prepare0: archprepare FORCE - $(Q)$(MAKE) $(build)=. - - # All the preparing.. -+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) - prepare: prepare0 - - # Generate some files -@@ -995,6 +1070,8 @@ all: modules - # using awk while concatenating to the final file. - - PHONY += modules -+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) -+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) - modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin - $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order - @$(kecho) ' Building modules, stage 2.'; -@@ -1010,7 +1087,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin) - - # Target to prepare building external modules - PHONY += modules_prepare --modules_prepare: prepare scripts -+modules_prepare: gcc-plugins prepare scripts - - # Target to install modules - PHONY += modules_install -@@ -1076,7 +1153,10 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \ - Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ - signing_key.priv signing_key.x509 x509.genkey \ - extra_certificates signing_key.x509.keyid \ -- signing_key.x509.signer -+ signing_key.x509.signer \ -+ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \ -+ tools/gcc/size_overflow_plugin/size_overflow_hash.h \ -+ tools/gcc/randomize_layout_seed.h - - # clean - Delete most, but leave enough to build external modules - # -@@ -1115,7 +1195,7 @@ distclean: mrproper - @find $(srctree) $(RCS_FIND_IGNORE) \ - \( -name '*.orig' -o -name '*.rej' -o -name '*~' \ - -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ -- -o -name '.*.rej' \ -+ -o -name '.*.rej' -o -name '*.so' \ - -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \ - -type f -print | xargs rm -f - -@@ -1277,6 +1357,8 @@ PHONY += $(module-dirs) modules - $(module-dirs): crmodverdir $(objtree)/Module.symvers - $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@) - -+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) -+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) - modules: $(module-dirs) - @$(kecho) ' Building modules, stage 2.'; - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost -@@ -1416,17 +1498,21 @@ else - target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@)) - endif - --%.s: %.c prepare scripts FORCE -+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) -+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) -+%.s: %.c gcc-plugins prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) - %.i: %.c prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) --%.o: %.c prepare scripts FORCE -+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) -+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) -+%.o: %.c gcc-plugins prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) - %.lst: %.c prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) --%.s: %.S prepare scripts FORCE -+%.s: %.S gcc-plugins prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) --%.o: %.S prepare scripts FORCE -+%.o: %.S gcc-plugins prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) - %.symtypes: %.c prepare scripts FORCE - $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@) -@@ -1436,11 +1522,15 @@ endif - $(cmd_crmodverdir) - $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ - $(build)=$(build-dir) --%/: prepare scripts FORCE -+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) -+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) -+%/: gcc-plugins prepare scripts FORCE - $(cmd_crmodverdir) - $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ - $(build)=$(build-dir) --%.ko: prepare scripts FORCE -+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS) -+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS) -+%.ko: gcc-plugins prepare scripts FORCE - $(cmd_crmodverdir) - $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \ - $(build)=$(build-dir) $(@:.ko=.o) -diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h -index 78b03ef..da28a51 100644 ---- a/arch/alpha/include/asm/atomic.h -+++ b/arch/alpha/include/asm/atomic.h -@@ -292,6 +292,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) - #define atomic_dec(v) atomic_sub(1,(v)) - #define atomic64_dec(v) atomic64_sub(1,(v)) - -+#define atomic64_read_unchecked(v) atomic64_read(v) -+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) -+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) -+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) -+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) -+#define atomic64_inc_unchecked(v) atomic64_inc(v) -+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) -+#define atomic64_dec_unchecked(v) atomic64_dec(v) -+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) -+ - #define smp_mb__before_atomic_dec() smp_mb() - #define smp_mb__after_atomic_dec() smp_mb() - #define smp_mb__before_atomic_inc() smp_mb() -diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h -index ad368a9..fbe0f25 100644 ---- a/arch/alpha/include/asm/cache.h -+++ b/arch/alpha/include/asm/cache.h -@@ -4,19 +4,19 @@ - #ifndef __ARCH_ALPHA_CACHE_H - #define __ARCH_ALPHA_CACHE_H - -+#include <linux/const.h> - - /* Bytes per L1 (data) cache line. */ - #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6) --# define L1_CACHE_BYTES 64 - # define L1_CACHE_SHIFT 6 - #else - /* Both EV4 and EV5 are write-through, read-allocate, - direct-mapped, physical. - */ --# define L1_CACHE_BYTES 32 - # define L1_CACHE_SHIFT 5 - #endif - -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - #define SMP_CACHE_BYTES L1_CACHE_BYTES - - #endif -diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h -index 968d999..d36b2df 100644 ---- a/arch/alpha/include/asm/elf.h -+++ b/arch/alpha/include/asm/elf.h -@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; - - #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) -+ -+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28) -+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19) -+#endif -+ - /* $0 is set by ld.so to a pointer to a function which might be - registered using atexit. This provides a mean for the dynamic - linker to call DT_FINI functions for shared libraries that have -diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h -index aab14a0..b4fa3e7 100644 ---- a/arch/alpha/include/asm/pgalloc.h -+++ b/arch/alpha/include/asm/pgalloc.h -@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) - pgd_set(pgd, pmd); - } - -+static inline void -+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) -+{ -+ pgd_populate(mm, pgd, pmd); -+} -+ - extern pgd_t *pgd_alloc(struct mm_struct *mm); - - static inline void -diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h -index d8f9b7e..f6222fa 100644 ---- a/arch/alpha/include/asm/pgtable.h -+++ b/arch/alpha/include/asm/pgtable.h -@@ -102,6 +102,17 @@ struct vm_area_struct; - #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) - #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) - #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) -+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) -+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) -+#else -+# define PAGE_SHARED_NOEXEC PAGE_SHARED -+# define PAGE_COPY_NOEXEC PAGE_COPY -+# define PAGE_READONLY_NOEXEC PAGE_READONLY -+#endif -+ - #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) - - #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) -diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c -index 2fd00b7..cfd5069 100644 ---- a/arch/alpha/kernel/module.c -+++ b/arch/alpha/kernel/module.c -@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, - - /* The small sections were sorted to the end of the segment. - The following should definitely cover them. */ -- gp = (u64)me->module_core + me->core_size - 0x8000; -+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000; - got = sechdrs[me->arch.gotsecindex].sh_addr; - - for (i = 0; i < n; i++) { -diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c -index 1402fcc..0b1abd2 100644 ---- a/arch/alpha/kernel/osf_sys.c -+++ b/arch/alpha/kernel/osf_sys.c -@@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) - generic version except that we know how to honor ADDR_LIMIT_32BIT. */ - - static unsigned long --arch_get_unmapped_area_1(unsigned long addr, unsigned long len, -- unsigned long limit) -+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len, -+ unsigned long limit, unsigned long flags) - { - struct vm_unmapped_area_info info; -+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); - - info.flags = 0; - info.length = len; -@@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len, - info.high_limit = limit; - info.align_mask = 0; - info.align_offset = 0; -+ info.threadstack_offset = offset; - return vm_unmapped_area(&info); - } - -@@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - merely specific addresses, but regions of memory -- perhaps - this feature should be incorporated into all ports? */ - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { -- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); -+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags); - if (addr != (unsigned long) -ENOMEM) - return addr; - } - - /* Next, try allocating at TASK_UNMAPPED_BASE. */ -- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), -- len, limit); -+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags); -+ - if (addr != (unsigned long) -ENOMEM) - return addr; - - /* Finally, try allocating in low memory. */ -- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit); -+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags); - - return addr; - } -diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c -index 9d0ac09..479a962 100644 ---- a/arch/alpha/mm/fault.c -+++ b/arch/alpha/mm/fault.c -@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm) - __reload_thread(pcb); - } - -+#ifdef CONFIG_PAX_PAGEEXEC -+/* -+ * PaX: decide what to do with offenders (regs->pc = fault address) -+ * -+ * returns 1 when task should be killed -+ * 2 when patched PLT trampoline was detected -+ * 3 when unpatched PLT trampoline was detected -+ */ -+static int pax_handle_fetch_fault(struct pt_regs *regs) -+{ -+ -+#ifdef CONFIG_PAX_EMUPLT -+ int err; -+ -+ do { /* PaX: patched PLT emulation #1 */ -+ unsigned int ldah, ldq, jmp; -+ -+ err = get_user(ldah, (unsigned int *)regs->pc); -+ err |= get_user(ldq, (unsigned int *)(regs->pc+4)); -+ err |= get_user(jmp, (unsigned int *)(regs->pc+8)); -+ -+ if (err) -+ break; -+ -+ if ((ldah & 0xFFFF0000U) == 0x277B0000U && -+ (ldq & 0xFFFF0000U) == 0xA77B0000U && -+ jmp == 0x6BFB0000U) -+ { -+ unsigned long r27, addr; -+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; -+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; -+ -+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); -+ err = get_user(r27, (unsigned long *)addr); -+ if (err) -+ break; -+ -+ regs->r27 = r27; -+ regs->pc = r27; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #2 */ -+ unsigned int ldah, lda, br; -+ -+ err = get_user(ldah, (unsigned int *)regs->pc); -+ err |= get_user(lda, (unsigned int *)(regs->pc+4)); -+ err |= get_user(br, (unsigned int *)(regs->pc+8)); -+ -+ if (err) -+ break; -+ -+ if ((ldah & 0xFFFF0000U) == 0x277B0000U && -+ (lda & 0xFFFF0000U) == 0xA77B0000U && -+ (br & 0xFFE00000U) == 0xC3E00000U) -+ { -+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL; -+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; -+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; -+ -+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); -+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: unpatched PLT emulation */ -+ unsigned int br; -+ -+ err = get_user(br, (unsigned int *)regs->pc); -+ -+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) { -+ unsigned int br2, ldq, nop, jmp; -+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; -+ -+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); -+ err = get_user(br2, (unsigned int *)addr); -+ err |= get_user(ldq, (unsigned int *)(addr+4)); -+ err |= get_user(nop, (unsigned int *)(addr+8)); -+ err |= get_user(jmp, (unsigned int *)(addr+12)); -+ err |= get_user(resolver, (unsigned long *)(addr+16)); -+ -+ if (err) -+ break; -+ -+ if (br2 == 0xC3600000U && -+ ldq == 0xA77B000CU && -+ nop == 0x47FF041FU && -+ jmp == 0x6B7B0000U) -+ { -+ regs->r28 = regs->pc+4; -+ regs->r27 = addr+16; -+ regs->pc = resolver; -+ return 3; -+ } -+ } -+ } while (0); -+#endif -+ -+ return 1; -+} -+ -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 5; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif - - /* - * This routine handles page faults. It determines the address, -@@ -133,8 +251,29 @@ retry: - good_area: - si_code = SEGV_ACCERR; - if (cause < 0) { -- if (!(vma->vm_flags & VM_EXEC)) -+ if (!(vma->vm_flags & VM_EXEC)) { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc) -+ goto bad_area; -+ -+ up_read(&mm->mmap_sem); -+ switch (pax_handle_fetch_fault(regs)) { -+ -+#ifdef CONFIG_PAX_EMUPLT -+ case 2: -+ case 3: -+ return; -+#endif -+ -+ } -+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp()); -+ do_group_exit(SIGKILL); -+#else - goto bad_area; -+#endif -+ -+ } - } else if (!cause) { - /* Allow reads even for write-only mappings */ - if (!(vma->vm_flags & (VM_READ | VM_WRITE))) -diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c -index a2ff5c5..ecf6a78 100644 ---- a/arch/arc/kernel/kgdb.c -+++ b/arch/arc/kernel/kgdb.c -@@ -158,11 +158,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, - return -1; - } - --unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) --{ -- return instruction_pointer(regs); --} -- - int kgdb_arch_init(void) - { - single_step_data.armed = 0; -diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 4733d32..b142a40 100644 ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -1863,7 +1863,7 @@ config ALIGNMENT_TRAP - - config UACCESS_WITH_MEMCPY - bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()" -- depends on MMU -+ depends on MMU && !PAX_MEMORY_UDEREF - default y if CPU_FEROCEON - help - Implement faster copy_to_user and clear_user methods for CPU -@@ -2126,6 +2126,7 @@ config XIP_PHYS_ADDR - config KEXEC - bool "Kexec system call (EXPERIMENTAL)" - depends on (!SMP || PM_SLEEP_SMP) -+ depends on !GRKERNSEC_KMEM - help - kexec is a system call that implements the ability to shutdown your - current kernel, and to start another kernel. It is like a reboot -diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h -index 62d2cb5..26a6f3c 100644 ---- a/arch/arm/include/asm/atomic.h -+++ b/arch/arm/include/asm/atomic.h -@@ -18,17 +18,41 @@ - #include <asm/barrier.h> - #include <asm/cmpxchg.h> - -+#ifdef CONFIG_GENERIC_ATOMIC64 -+#include <asm-generic/atomic64.h> -+#endif -+ - #define ATOMIC_INIT(i) { (i) } - - #ifdef __KERNEL__ - -+#ifdef CONFIG_THUMB2_KERNEL -+#define REFCOUNT_TRAP_INSN "bkpt 0xf1" -+#else -+#define REFCOUNT_TRAP_INSN "bkpt 0xf103" -+#endif -+ -+#define _ASM_EXTABLE(from, to) \ -+" .pushsection __ex_table,\"a\"\n"\ -+" .align 3\n" \ -+" .long " #from ", " #to"\n" \ -+" .popsection" -+ - /* - * On ARM, ordinary assignment (str instruction) doesn't clear the local - * strex/ldrex monitor on some implementations. The reason we can use it for - * atomic_set() is the clrex or dummy strex done on every exception return. - */ - #define atomic_read(v) (*(volatile int *)&(v)->counter) -+static inline int atomic_read_unchecked(const atomic_unchecked_t *v) -+{ -+ return *(const volatile int *)&v->counter; -+} - #define atomic_set(v,i) (((v)->counter) = (i)) -+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) -+{ -+ v->counter = i; -+} - - #if __LINUX_ARM_ARCH__ >= 6 - -@@ -44,6 +68,36 @@ static inline void atomic_add(int i, atomic_t *v) - - prefetchw(&v->counter); - __asm__ __volatile__("@ atomic_add\n" -+"1: ldrex %1, [%3]\n" -+" adds %0, %1, %4\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" bvc 3f\n" -+"2: " REFCOUNT_TRAP_INSN "\n" -+"3:\n" -+#endif -+ -+" strex %1, %0, [%3]\n" -+" teq %1, #0\n" -+" bne 1b" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+"\n4:\n" -+ _ASM_EXTABLE(2b, 4b) -+#endif -+ -+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) -+ : "r" (&v->counter), "Ir" (i) -+ : "cc"); -+} -+ -+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) -+{ -+ unsigned long tmp; -+ int result; -+ -+ prefetchw(&v->counter); -+ __asm__ __volatile__("@ atomic_add_unchecked\n" - "1: ldrex %0, [%3]\n" - " add %0, %0, %4\n" - " strex %1, %0, [%3]\n" -@@ -62,6 +116,42 @@ static inline int atomic_add_return(int i, atomic_t *v) - smp_mb(); - - __asm__ __volatile__("@ atomic_add_return\n" -+"1: ldrex %1, [%3]\n" -+" adds %0, %1, %4\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" bvc 3f\n" -+" mov %0, %1\n" -+"2: " REFCOUNT_TRAP_INSN "\n" -+"3:\n" -+#endif -+ -+" strex %1, %0, [%3]\n" -+" teq %1, #0\n" -+" bne 1b" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+"\n4:\n" -+ _ASM_EXTABLE(2b, 4b) -+#endif -+ -+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) -+ : "r" (&v->counter), "Ir" (i) -+ : "cc"); -+ -+ smp_mb(); -+ -+ return result; -+} -+ -+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) -+{ -+ unsigned long tmp; -+ int result; -+ -+ smp_mb(); -+ -+ __asm__ __volatile__("@ atomic_add_return_unchecked\n" - "1: ldrex %0, [%3]\n" - " add %0, %0, %4\n" - " strex %1, %0, [%3]\n" -@@ -83,6 +173,36 @@ static inline void atomic_sub(int i, atomic_t *v) - - prefetchw(&v->counter); - __asm__ __volatile__("@ atomic_sub\n" -+"1: ldrex %1, [%3]\n" -+" subs %0, %1, %4\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" bvc 3f\n" -+"2: " REFCOUNT_TRAP_INSN "\n" -+"3:\n" -+#endif -+ -+" strex %1, %0, [%3]\n" -+" teq %1, #0\n" -+" bne 1b" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+"\n4:\n" -+ _ASM_EXTABLE(2b, 4b) -+#endif -+ -+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) -+ : "r" (&v->counter), "Ir" (i) -+ : "cc"); -+} -+ -+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) -+{ -+ unsigned long tmp; -+ int result; -+ -+ prefetchw(&v->counter); -+ __asm__ __volatile__("@ atomic_sub_unchecked\n" - "1: ldrex %0, [%3]\n" - " sub %0, %0, %4\n" - " strex %1, %0, [%3]\n" -@@ -101,11 +221,25 @@ static inline int atomic_sub_return(int i, atomic_t *v) - smp_mb(); - - __asm__ __volatile__("@ atomic_sub_return\n" --"1: ldrex %0, [%3]\n" --" sub %0, %0, %4\n" -+"1: ldrex %1, [%3]\n" -+" subs %0, %1, %4\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" bvc 3f\n" -+" mov %0, %1\n" -+"2: " REFCOUNT_TRAP_INSN "\n" -+"3:\n" -+#endif -+ - " strex %1, %0, [%3]\n" - " teq %1, #0\n" - " bne 1b" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+"\n4:\n" -+ _ASM_EXTABLE(2b, 4b) -+#endif -+ - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "Ir" (i) - : "cc"); -@@ -138,6 +272,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) - return oldval; - } - -+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new) -+{ -+ unsigned long oldval, res; -+ -+ smp_mb(); -+ -+ do { -+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n" -+ "ldrex %1, [%3]\n" -+ "mov %0, #0\n" -+ "teq %1, %4\n" -+ "strexeq %0, %5, [%3]\n" -+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) -+ : "r" (&ptr->counter), "Ir" (old), "r" (new) -+ : "cc"); -+ } while (res); -+ -+ smp_mb(); -+ -+ return oldval; -+} -+ - #else /* ARM_ARCH_6 */ - - #ifdef CONFIG_SMP -@@ -156,7 +312,17 @@ static inline int atomic_add_return(int i, atomic_t *v) - - return val; - } -+ -+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) -+{ -+ return atomic_add_return(i, v); -+} -+ - #define atomic_add(i, v) (void) atomic_add_return(i, v) -+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) -+{ -+ (void) atomic_add_return(i, v); -+} - - static inline int atomic_sub_return(int i, atomic_t *v) - { -@@ -171,6 +337,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) - return val; - } - #define atomic_sub(i, v) (void) atomic_sub_return(i, v) -+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) -+{ -+ (void) atomic_sub_return(i, v); -+} - - static inline int atomic_cmpxchg(atomic_t *v, int old, int new) - { -@@ -186,9 +356,18 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) - return ret; - } - -+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) -+{ -+ return atomic_cmpxchg(v, old, new); -+} -+ - #endif /* __LINUX_ARM_ARCH__ */ - - #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) -+{ -+ return xchg(&v->counter, new); -+} - - static inline int __atomic_add_unless(atomic_t *v, int a, int u) - { -@@ -201,11 +380,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) - } - - #define atomic_inc(v) atomic_add(1, v) -+static inline void atomic_inc_unchecked(atomic_unchecked_t *v) -+{ -+ atomic_add_unchecked(1, v); -+} - #define atomic_dec(v) atomic_sub(1, v) -+static inline void atomic_dec_unchecked(atomic_unchecked_t *v) -+{ -+ atomic_sub_unchecked(1, v); -+} - - #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) -+{ -+ return atomic_add_return_unchecked(1, v) == 0; -+} - #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) - #define atomic_inc_return(v) (atomic_add_return(1, v)) -+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) -+{ -+ return atomic_add_return_unchecked(1, v); -+} - #define atomic_dec_return(v) (atomic_sub_return(1, v)) - #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) - -@@ -221,6 +416,14 @@ typedef struct { - long long counter; - } atomic64_t; - -+#ifdef CONFIG_PAX_REFCOUNT -+typedef struct { -+ long long counter; -+} atomic64_unchecked_t; -+#else -+typedef atomic64_t atomic64_unchecked_t; -+#endif -+ - #define ATOMIC64_INIT(i) { (i) } - - #ifdef CONFIG_ARM_LPAE -@@ -237,6 +440,19 @@ static inline long long atomic64_read(const atomic64_t *v) - return result; - } - -+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v) -+{ -+ long long result; -+ -+ __asm__ __volatile__("@ atomic64_read_unchecked\n" -+" ldrd %0, %H0, [%1]" -+ : "=&r" (result) -+ : "r" (&v->counter), "Qo" (v->counter) -+ ); -+ -+ return result; -+} -+ - static inline void atomic64_set(atomic64_t *v, long long i) - { - __asm__ __volatile__("@ atomic64_set\n" -@@ -245,6 +461,15 @@ static inline void atomic64_set(atomic64_t *v, long long i) - : "r" (&v->counter), "r" (i) - ); - } -+ -+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) -+{ -+ __asm__ __volatile__("@ atomic64_set_unchecked\n" -+" strd %2, %H2, [%1]" -+ : "=Qo" (v->counter) -+ : "r" (&v->counter), "r" (i) -+ ); -+} - #else - static inline long long atomic64_read(const atomic64_t *v) - { -@@ -259,6 +484,19 @@ static inline long long atomic64_read(const atomic64_t *v) - return result; - } - -+static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v) -+{ -+ long long result; -+ -+ __asm__ __volatile__("@ atomic64_read_unchecked\n" -+" ldrexd %0, %H0, [%1]" -+ : "=&r" (result) -+ : "r" (&v->counter), "Qo" (v->counter) -+ ); -+ -+ return result; -+} -+ - static inline void atomic64_set(atomic64_t *v, long long i) - { - long long tmp; -@@ -273,6 +511,21 @@ static inline void atomic64_set(atomic64_t *v, long long i) - : "r" (&v->counter), "r" (i) - : "cc"); - } -+ -+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) -+{ -+ long long tmp; -+ -+ prefetchw(&v->counter); -+ __asm__ __volatile__("@ atomic64_set_unchecked\n" -+"1: ldrexd %0, %H0, [%2]\n" -+" strexd %0, %3, %H3, [%2]\n" -+" teq %0, #0\n" -+" bne 1b" -+ : "=&r" (tmp), "=Qo" (v->counter) -+ : "r" (&v->counter), "r" (i) -+ : "cc"); -+} - #endif - - static inline void atomic64_add(long long i, atomic64_t *v) -@@ -284,6 +537,37 @@ static inline void atomic64_add(long long i, atomic64_t *v) - __asm__ __volatile__("@ atomic64_add\n" - "1: ldrexd %0, %H0, [%3]\n" - " adds %Q0, %Q0, %Q4\n" -+" adcs %R0, %R0, %R4\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" bvc 3f\n" -+"2: " REFCOUNT_TRAP_INSN "\n" -+"3:\n" -+#endif -+ -+" strexd %1, %0, %H0, [%3]\n" -+" teq %1, #0\n" -+" bne 1b" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+"\n4:\n" -+ _ASM_EXTABLE(2b, 4b) -+#endif -+ -+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) -+ : "r" (&v->counter), "r" (i) -+ : "cc"); -+} -+ -+static inline void atomic64_add_unchecked(long long i, atomic64_unchecked_t *v) -+{ -+ long long result; -+ unsigned long tmp; -+ -+ prefetchw(&v->counter); -+ __asm__ __volatile__("@ atomic64_add_unchecked\n" -+"1: ldrexd %0, %H0, [%3]\n" -+" adds %Q0, %Q0, %Q4\n" - " adc %R0, %R0, %R4\n" - " strexd %1, %0, %H0, [%3]\n" - " teq %1, #0\n" -@@ -303,6 +587,44 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) - __asm__ __volatile__("@ atomic64_add_return\n" - "1: ldrexd %0, %H0, [%3]\n" - " adds %Q0, %Q0, %Q4\n" -+" adcs %R0, %R0, %R4\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" bvc 3f\n" -+" mov %0, %1\n" -+" mov %H0, %H1\n" -+"2: " REFCOUNT_TRAP_INSN "\n" -+"3:\n" -+#endif -+ -+" strexd %1, %0, %H0, [%3]\n" -+" teq %1, #0\n" -+" bne 1b" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+"\n4:\n" -+ _ASM_EXTABLE(2b, 4b) -+#endif -+ -+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) -+ : "r" (&v->counter), "r" (i) -+ : "cc"); -+ -+ smp_mb(); -+ -+ return result; -+} -+ -+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v) -+{ -+ long long result; -+ unsigned long tmp; -+ -+ smp_mb(); -+ -+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n" -+"1: ldrexd %0, %H0, [%3]\n" -+" adds %Q0, %Q0, %Q4\n" - " adc %R0, %R0, %R4\n" - " strexd %1, %0, %H0, [%3]\n" - " teq %1, #0\n" -@@ -325,6 +647,37 @@ static inline void atomic64_sub(long long i, atomic64_t *v) - __asm__ __volatile__("@ atomic64_sub\n" - "1: ldrexd %0, %H0, [%3]\n" - " subs %Q0, %Q0, %Q4\n" -+" sbcs %R0, %R0, %R4\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" bvc 3f\n" -+"2: " REFCOUNT_TRAP_INSN "\n" -+"3:\n" -+#endif -+ -+" strexd %1, %0, %H0, [%3]\n" -+" teq %1, #0\n" -+" bne 1b" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+"\n4:\n" -+ _ASM_EXTABLE(2b, 4b) -+#endif -+ -+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) -+ : "r" (&v->counter), "r" (i) -+ : "cc"); -+} -+ -+static inline void atomic64_sub_unchecked(long long i, atomic64_unchecked_t *v) -+{ -+ long long result; -+ unsigned long tmp; -+ -+ prefetchw(&v->counter); -+ __asm__ __volatile__("@ atomic64_sub_unchecked\n" -+"1: ldrexd %0, %H0, [%3]\n" -+" subs %Q0, %Q0, %Q4\n" - " sbc %R0, %R0, %R4\n" - " strexd %1, %0, %H0, [%3]\n" - " teq %1, #0\n" -@@ -344,10 +697,25 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) - __asm__ __volatile__("@ atomic64_sub_return\n" - "1: ldrexd %0, %H0, [%3]\n" - " subs %Q0, %Q0, %Q4\n" --" sbc %R0, %R0, %R4\n" -+" sbcs %R0, %R0, %R4\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" bvc 3f\n" -+" mov %0, %1\n" -+" mov %H0, %H1\n" -+"2: " REFCOUNT_TRAP_INSN "\n" -+"3:\n" -+#endif -+ - " strexd %1, %0, %H0, [%3]\n" - " teq %1, #0\n" - " bne 1b" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+"\n4:\n" -+ _ASM_EXTABLE(2b, 4b) -+#endif -+ - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "r" (i) - : "cc"); -@@ -382,6 +750,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, - return oldval; - } - -+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old, -+ long long new) -+{ -+ long long oldval; -+ unsigned long res; -+ -+ smp_mb(); -+ -+ do { -+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n" -+ "ldrexd %1, %H1, [%3]\n" -+ "mov %0, #0\n" -+ "teq %1, %4\n" -+ "teqeq %H1, %H4\n" -+ "strexdeq %0, %5, %H5, [%3]" -+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) -+ : "r" (&ptr->counter), "r" (old), "r" (new) -+ : "cc"); -+ } while (res); -+ -+ smp_mb(); -+ -+ return oldval; -+} -+ - static inline long long atomic64_xchg(atomic64_t *ptr, long long new) - { - long long result; -@@ -406,20 +799,34 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new) - static inline long long atomic64_dec_if_positive(atomic64_t *v) - { - long long result; -- unsigned long tmp; -+ u64 tmp; - - smp_mb(); - - __asm__ __volatile__("@ atomic64_dec_if_positive\n" --"1: ldrexd %0, %H0, [%3]\n" --" subs %Q0, %Q0, #1\n" --" sbc %R0, %R0, #0\n" -+"1: ldrexd %1, %H1, [%3]\n" -+" subs %Q0, %Q1, #1\n" -+" sbcs %R0, %R1, #0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" bvc 3f\n" -+" mov %Q0, %Q1\n" -+" mov %R0, %R1\n" -+"2: " REFCOUNT_TRAP_INSN "\n" -+"3:\n" -+#endif -+ - " teq %R0, #0\n" --" bmi 2f\n" -+" bmi 4f\n" - " strexd %1, %0, %H0, [%3]\n" - " teq %1, #0\n" - " bne 1b\n" --"2:" -+"4:\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ _ASM_EXTABLE(2b, 4b) -+#endif -+ - : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter) - : "cc"); -@@ -442,13 +849,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) - " teq %0, %5\n" - " teqeq %H0, %H5\n" - " moveq %1, #0\n" --" beq 2f\n" -+" beq 4f\n" - " adds %Q0, %Q0, %Q6\n" --" adc %R0, %R0, %R6\n" -+" adcs %R0, %R0, %R6\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" bvc 3f\n" -+"2: " REFCOUNT_TRAP_INSN "\n" -+"3:\n" -+#endif -+ - " strexd %2, %0, %H0, [%4]\n" - " teq %2, #0\n" - " bne 1b\n" --"2:" -+"4:\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ _ASM_EXTABLE(2b, 4b) -+#endif -+ - : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) - : "r" (&v->counter), "r" (u), "r" (a) - : "cc"); -@@ -461,10 +880,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) - - #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) - #define atomic64_inc(v) atomic64_add(1LL, (v)) -+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v)) - #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) -+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v)) - #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) - #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) - #define atomic64_dec(v) atomic64_sub(1LL, (v)) -+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v)) - #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) - #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) - #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) -diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h -index 2f59f74..1594659 100644 ---- a/arch/arm/include/asm/barrier.h -+++ b/arch/arm/include/asm/barrier.h -@@ -63,7 +63,7 @@ - do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ -- ACCESS_ONCE(*p) = (v); \ -+ ACCESS_ONCE_RW(*p) = (v); \ - } while (0) - - #define smp_load_acquire(p) \ -diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h -index 75fe66b..ba3dee4 100644 ---- a/arch/arm/include/asm/cache.h -+++ b/arch/arm/include/asm/cache.h -@@ -4,8 +4,10 @@ - #ifndef __ASMARM_CACHE_H - #define __ASMARM_CACHE_H - -+#include <linux/const.h> -+ - #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - /* - * Memory returned by kmalloc() may be used for DMA, so we must make -@@ -24,5 +26,6 @@ - #endif - - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) -+#define __read_only __attribute__ ((__section__(".data..read_only"))) - - #endif -diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h -index 8b8b616..d973d24 100644 ---- a/arch/arm/include/asm/cacheflush.h -+++ b/arch/arm/include/asm/cacheflush.h -@@ -116,7 +116,7 @@ struct cpu_cache_fns { - void (*dma_unmap_area)(const void *, size_t, int); - - void (*dma_flush_range)(const void *, const void *); --}; -+} __no_const; - - /* - * Select the calling method -diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h -index 5233151..87a71fa 100644 ---- a/arch/arm/include/asm/checksum.h -+++ b/arch/arm/include/asm/checksum.h -@@ -37,7 +37,19 @@ __wsum - csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); - - __wsum --csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); -+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); -+ -+static inline __wsum -+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) -+{ -+ __wsum ret; -+ pax_open_userland(); -+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr); -+ pax_close_userland(); -+ return ret; -+} -+ -+ - - /* - * Fold a partial checksum without adding pseudo headers -diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h -index df2fbba..63fe3e1 100644 ---- a/arch/arm/include/asm/cmpxchg.h -+++ b/arch/arm/include/asm/cmpxchg.h -@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size - - #define xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) -+#define xchg_unchecked(ptr,x) \ -+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) - - #include <asm-generic/cmpxchg-local.h> - -diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h -index 6ddbe44..b5e38b1a 100644 ---- a/arch/arm/include/asm/domain.h -+++ b/arch/arm/include/asm/domain.h -@@ -48,18 +48,37 @@ - * Domain types - */ - #define DOMAIN_NOACCESS 0 --#define DOMAIN_CLIENT 1 - #ifdef CONFIG_CPU_USE_DOMAINS -+#define DOMAIN_USERCLIENT 1 -+#define DOMAIN_KERNELCLIENT 1 - #define DOMAIN_MANAGER 3 -+#define DOMAIN_VECTORS DOMAIN_USER - #else -+ -+#ifdef CONFIG_PAX_KERNEXEC - #define DOMAIN_MANAGER 1 -+#define DOMAIN_KERNEXEC 3 -+#else -+#define DOMAIN_MANAGER 1 -+#endif -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+#define DOMAIN_USERCLIENT 0 -+#define DOMAIN_UDEREF 1 -+#define DOMAIN_VECTORS DOMAIN_KERNEL -+#else -+#define DOMAIN_USERCLIENT 1 -+#define DOMAIN_VECTORS DOMAIN_USER -+#endif -+#define DOMAIN_KERNELCLIENT 1 -+ - #endif - - #define domain_val(dom,type) ((type) << (2*(dom))) - - #ifndef __ASSEMBLY__ - --#ifdef CONFIG_CPU_USE_DOMAINS -+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) - static inline void set_domain(unsigned val) - { - asm volatile( -@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val) - isb(); - } - --#define modify_domain(dom,type) \ -- do { \ -- struct thread_info *thread = current_thread_info(); \ -- unsigned int domain = thread->cpu_domain; \ -- domain &= ~domain_val(dom, DOMAIN_MANAGER); \ -- thread->cpu_domain = domain | domain_val(dom, type); \ -- set_domain(thread->cpu_domain); \ -- } while (0) -- -+extern void modify_domain(unsigned int dom, unsigned int type); - #else - static inline void set_domain(unsigned val) { } - static inline void modify_domain(unsigned dom, unsigned type) { } -diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h -index 051b726..abc9b2b 100644 ---- a/arch/arm/include/asm/elf.h -+++ b/arch/arm/include/asm/elf.h -@@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - --#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) -+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) -+ -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE 0x00008000UL -+ -+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) -+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) -+#endif - - /* When the program starts, a1 contains a pointer to a function to be - registered with atexit, as per the SVR4 ABI. A value of 0 means we -@@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); - extern void elf_set_personality(const struct elf32_hdr *); - #define SET_PERSONALITY(ex) elf_set_personality(&(ex)) - --struct mm_struct; --extern unsigned long arch_randomize_brk(struct mm_struct *mm); --#define arch_randomize_brk arch_randomize_brk -- - #ifdef CONFIG_MMU - #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 - struct linux_binprm; -diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h -index de53547..52b9a28 100644 ---- a/arch/arm/include/asm/fncpy.h -+++ b/arch/arm/include/asm/fncpy.h -@@ -81,7 +81,9 @@ - BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \ - (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \ - \ -+ pax_open_kernel(); \ - memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \ -+ pax_close_kernel(); \ - flush_icache_range((unsigned long)(dest_buf), \ - (unsigned long)(dest_buf) + (size)); \ - \ -diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h -index 2aff798..099eb15 100644 ---- a/arch/arm/include/asm/futex.h -+++ b/arch/arm/include/asm/futex.h -@@ -45,6 +45,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) - return -EFAULT; - -+ pax_open_userland(); -+ - smp_mb(); - __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" - "1: ldrex %1, [%4]\n" -@@ -60,6 +62,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - : "cc", "memory"); - smp_mb(); - -+ pax_close_userland(); -+ - *uval = val; - return ret; - } -@@ -90,6 +94,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) - return -EFAULT; - -+ pax_open_userland(); -+ - __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" - "1: " TUSER(ldr) " %1, [%4]\n" - " teq %1, %2\n" -@@ -100,6 +106,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) - : "cc", "memory"); - -+ pax_close_userland(); -+ - *uval = val; - return ret; - } -@@ -122,6 +130,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) - return -EFAULT; - - pagefault_disable(); /* implies preempt_disable() */ -+ pax_open_userland(); - - switch (op) { - case FUTEX_OP_SET: -@@ -143,6 +152,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) - ret = -ENOSYS; - } - -+ pax_close_userland(); - pagefault_enable(); /* subsumes preempt_enable() */ - - if (!ret) { -diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h -index 83eb2f7..ed77159 100644 ---- a/arch/arm/include/asm/kmap_types.h -+++ b/arch/arm/include/asm/kmap_types.h -@@ -4,6 +4,6 @@ - /* - * This is the "bare minimum". AIO seems to require this. - */ --#define KM_TYPE_NR 16 -+#define KM_TYPE_NR 17 - - #endif -diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h -index 9e614a1..3302cca 100644 ---- a/arch/arm/include/asm/mach/dma.h -+++ b/arch/arm/include/asm/mach/dma.h -@@ -22,7 +22,7 @@ struct dma_ops { - int (*residue)(unsigned int, dma_t *); /* optional */ - int (*setspeed)(unsigned int, dma_t *, int); /* optional */ - const char *type; --}; -+} __do_const; - - struct dma_struct { - void *addr; /* single DMA address */ -diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h -index f98c7f3..e5c626d 100644 ---- a/arch/arm/include/asm/mach/map.h -+++ b/arch/arm/include/asm/mach/map.h -@@ -23,17 +23,19 @@ struct map_desc { - - /* types 0-3 are defined in asm/io.h */ - enum { -- MT_UNCACHED = 4, -- MT_CACHECLEAN, -- MT_MINICLEAN, -+ MT_UNCACHED_RW = 4, -+ MT_CACHECLEAN_RO, -+ MT_MINICLEAN_RO, - MT_LOW_VECTORS, - MT_HIGH_VECTORS, -- MT_MEMORY_RWX, -+ __MT_MEMORY_RWX, - MT_MEMORY_RW, -- MT_ROM, -- MT_MEMORY_RWX_NONCACHED, -+ MT_MEMORY_RX, -+ MT_ROM_RX, -+ MT_MEMORY_RW_NONCACHED, -+ MT_MEMORY_RX_NONCACHED, - MT_MEMORY_RW_DTCM, -- MT_MEMORY_RWX_ITCM, -+ MT_MEMORY_RX_ITCM, - MT_MEMORY_RW_SO, - MT_MEMORY_DMA_READY, - }; -diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h -index f94784f..9a09a4a 100644 ---- a/arch/arm/include/asm/outercache.h -+++ b/arch/arm/include/asm/outercache.h -@@ -35,7 +35,7 @@ struct outer_cache_fns { - #endif - void (*set_debug)(unsigned long); - void (*resume)(void); --}; -+} __no_const; - - extern struct outer_cache_fns outer_cache; - -diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h -index 4355f0e..cd9168e 100644 ---- a/arch/arm/include/asm/page.h -+++ b/arch/arm/include/asm/page.h -@@ -23,6 +23,7 @@ - - #else - -+#include <linux/compiler.h> - #include <asm/glue.h> - - /* -@@ -114,7 +115,7 @@ struct cpu_user_fns { - void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); - void (*cpu_copy_user_highpage)(struct page *to, struct page *from, - unsigned long vaddr, struct vm_area_struct *vma); --}; -+} __no_const; - - #ifdef MULTI_USER - extern struct cpu_user_fns cpu_user; -diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h -index 78a7793..e3dc06c 100644 ---- a/arch/arm/include/asm/pgalloc.h -+++ b/arch/arm/include/asm/pgalloc.h -@@ -17,6 +17,7 @@ - #include <asm/processor.h> - #include <asm/cacheflush.h> - #include <asm/tlbflush.h> -+#include <asm/system_info.h> - - #define check_pgt_cache() do { } while (0) - -@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) - set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); - } - -+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) -+{ -+ pud_populate(mm, pud, pmd); -+} -+ - #else /* !CONFIG_ARM_LPAE */ - - /* -@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) - #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); }) - #define pmd_free(mm, pmd) do { } while (0) - #define pud_populate(mm,pmd,pte) BUG() -+#define pud_populate_kernel(mm,pmd,pte) BUG() - - #endif /* CONFIG_ARM_LPAE */ - -@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) - __free_page(pte); - } - -+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot) -+{ -+#ifdef CONFIG_ARM_LPAE -+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot); -+#else -+ if (addr & SECTION_SIZE) -+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot); -+ else -+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot); -+#endif -+ flush_pmd_entry(pmdp); -+} -+ - static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, - pmdval_t prot) - { -@@ -157,7 +177,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) - static inline void - pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) - { -- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); -+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask); - } - #define pmd_pgtable(pmd) pmd_page(pmd) - -diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h -index 5cfba15..f415e1a 100644 ---- a/arch/arm/include/asm/pgtable-2level-hwdef.h -+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h -@@ -20,12 +20,15 @@ - #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) - #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) - #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) -+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ - #define PMD_BIT4 (_AT(pmdval_t, 1) << 4) - #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) - #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ -+ - /* - * - section - */ -+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */ - #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) - #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) - #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ -@@ -37,6 +40,7 @@ - #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */ - #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */ - #define PMD_SECT_AF (_AT(pmdval_t, 0)) -+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0)) - - #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0)) - #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) -@@ -66,6 +70,7 @@ - * - extended small page/tiny page - */ - #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */ -+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */ - #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4) - #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4) - #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4) -diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h -index 219ac88..73ec32a 100644 ---- a/arch/arm/include/asm/pgtable-2level.h -+++ b/arch/arm/include/asm/pgtable-2level.h -@@ -126,6 +126,9 @@ - #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ - #define L_PTE_NONE (_AT(pteval_t, 1) << 11) - -+/* Two-level page tables only have PXN in the PGD, not in the PTE. */ -+#define L_PTE_PXN (_AT(pteval_t, 0)) -+ - /* - * These are the memory types, defined to be compatible with - * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB -diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h -index 9fd61c7..f8f1cff 100644 ---- a/arch/arm/include/asm/pgtable-3level-hwdef.h -+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h -@@ -76,6 +76,7 @@ - #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ - #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ - #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ -+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */ - #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */ - - /* -diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h -index 06e0bc0..e60c2d3 100644 ---- a/arch/arm/include/asm/pgtable-3level.h -+++ b/arch/arm/include/asm/pgtable-3level.h -@@ -81,6 +81,7 @@ - #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ - #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ - #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ -+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */ - #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ - #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) - #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) -@@ -96,6 +97,7 @@ - /* - * To be used in assembly code with the upper page attributes. - */ -+#define L_PTE_PXN_HIGH (1 << (53 - 32)) - #define L_PTE_XN_HIGH (1 << (54 - 32)) - #define L_PTE_DIRTY_HIGH (1 << (55 - 32)) - -diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h -index 89dba13..ca1cf20 100644 ---- a/arch/arm/include/asm/pgtable.h -+++ b/arch/arm/include/asm/pgtable.h -@@ -33,6 +33,9 @@ - #include <asm/pgtable-2level.h> - #endif - -+#define ktla_ktva(addr) (addr) -+#define ktva_ktla(addr) (addr) -+ - /* - * Just any arbitrary offset to the start of the vmalloc VM area: the - * current 8MB value just means that there will be a 8MB "hole" after the -@@ -48,6 +51,9 @@ - #define LIBRARY_TEXT_START 0x0c000000 - - #ifndef __ASSEMBLY__ -+extern pteval_t __supported_pte_mask; -+extern pmdval_t __supported_pmd_mask; -+ - extern void __pte_error(const char *file, int line, pte_t); - extern void __pmd_error(const char *file, int line, pmd_t); - extern void __pgd_error(const char *file, int line, pgd_t); -@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t); - #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) - #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) - -+#define __HAVE_ARCH_PAX_OPEN_KERNEL -+#define __HAVE_ARCH_PAX_CLOSE_KERNEL -+ -+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) -+#include <asm/domain.h> -+#include <linux/thread_info.h> -+#include <linux/preempt.h> -+ -+static inline int test_domain(int domain, int domaintype) -+{ -+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype); -+} -+#endif -+ -+#ifdef CONFIG_PAX_KERNEXEC -+static inline unsigned long pax_open_kernel(void) { -+#ifdef CONFIG_ARM_LPAE -+ /* TODO */ -+#else -+ preempt_disable(); -+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC)); -+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC); -+#endif -+ return 0; -+} -+ -+static inline unsigned long pax_close_kernel(void) { -+#ifdef CONFIG_ARM_LPAE -+ /* TODO */ -+#else -+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER)); -+ /* DOMAIN_MANAGER = "client" under KERNEXEC */ -+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER); -+ preempt_enable_no_resched(); -+#endif -+ return 0; -+} -+#else -+static inline unsigned long pax_open_kernel(void) { return 0; } -+static inline unsigned long pax_close_kernel(void) { return 0; } -+#endif -+ - /* - * This is the lowest virtual address we can permit any user space - * mapping to be mapped at. This is particularly important for -@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t); - /* - * The pgprot_* and protection_map entries will be fixed up in runtime - * to include the cachable and bufferable bits based on memory policy, -- * as well as any architecture dependent bits like global/ASID and SMP -- * shared mapping bits. -+ * as well as any architecture dependent bits like global/ASID, PXN, -+ * and SMP shared mapping bits. - */ - #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG - -@@ -266,7 +314,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; } - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) - { - const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | -- L_PTE_NONE | L_PTE_VALID; -+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask; - pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); - return pte; - } -diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h -index c4ae171..ea0c0c2 100644 ---- a/arch/arm/include/asm/psci.h -+++ b/arch/arm/include/asm/psci.h -@@ -29,7 +29,7 @@ struct psci_operations { - int (*cpu_off)(struct psci_power_state state); - int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); - int (*migrate)(unsigned long cpuid); --}; -+} __no_const; - - extern struct psci_operations psci_ops; - extern struct smp_operations psci_smp_ops; -diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h -index 4157aec..375a858 100644 ---- a/arch/arm/include/asm/smp.h -+++ b/arch/arm/include/asm/smp.h -@@ -113,7 +113,7 @@ struct smp_operations { - int (*cpu_disable)(unsigned int cpu); - #endif - #endif --}; -+} __no_const; - - /* - * set platform specific SMP operations -diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h -index 3e635ee..c39f5b4 100644 ---- a/arch/arm/include/asm/thread_info.h -+++ b/arch/arm/include/asm/thread_info.h -@@ -77,9 +77,9 @@ struct thread_info { - .flags = 0, \ - .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ -- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ -- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ -- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \ -+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \ -+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \ -+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ -@@ -146,7 +146,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, - #define TIF_SYSCALL_AUDIT 9 - #define TIF_SYSCALL_TRACEPOINT 10 - #define TIF_SECCOMP 11 /* seccomp syscall filtering active */ --#define TIF_NOHZ 12 /* in adaptive nohz mode */ -+/* within 8 bits of TIF_SYSCALL_TRACE -+ * to meet flexible second operand requirements -+ */ -+#define TIF_GRSEC_SETXID 12 -+#define TIF_NOHZ 13 /* in adaptive nohz mode */ - #define TIF_USING_IWMMXT 17 - #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ - #define TIF_RESTORE_SIGMASK 20 -@@ -159,10 +163,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, - #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) - #define _TIF_SECCOMP (1 << TIF_SECCOMP) - #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) -+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID) - - /* Checks for any syscall work in entry-common.S */ - #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ -- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) -+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID) - - /* - * Change these and you break ASM code in entry-common.S -diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h -index 5f833f7..76e6644 100644 ---- a/arch/arm/include/asm/tls.h -+++ b/arch/arm/include/asm/tls.h -@@ -3,6 +3,7 @@ - - #include <linux/compiler.h> - #include <asm/thread_info.h> -+#include <asm/pgtable.h> - - #ifdef __ASSEMBLY__ - #include <asm/asm-offsets.h> -@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val) - * at 0xffff0fe0 must be used instead. (see - * entry-armv.S for details) - */ -+ pax_open_kernel(); - *((unsigned int *)0xffff0ff0) = val; -+ pax_close_kernel(); - #endif - } - -diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h -index 7f3f3cc..bdf0665 100644 ---- a/arch/arm/include/asm/uaccess.h -+++ b/arch/arm/include/asm/uaccess.h -@@ -18,6 +18,7 @@ - #include <asm/domain.h> - #include <asm/unified.h> - #include <asm/compiler.h> -+#include <asm/pgtable.h> - - #if __LINUX_ARM_ARCH__ < 6 - #include <asm-generic/uaccess-unaligned.h> -@@ -70,11 +71,38 @@ extern int __put_user_bad(void); - static inline void set_fs(mm_segment_t fs) - { - current_thread_info()->addr_limit = fs; -- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); -+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER); - } - - #define segment_eq(a,b) ((a) == (b)) - -+#define __HAVE_ARCH_PAX_OPEN_USERLAND -+#define __HAVE_ARCH_PAX_CLOSE_USERLAND -+ -+static inline void pax_open_userland(void) -+{ -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (segment_eq(get_fs(), USER_DS)) { -+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF)); -+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF); -+ } -+#endif -+ -+} -+ -+static inline void pax_close_userland(void) -+{ -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (segment_eq(get_fs(), USER_DS)) { -+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS)); -+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS); -+ } -+#endif -+ -+} -+ - #define __addr_ok(addr) ({ \ - unsigned long flag; \ - __asm__("cmp %2, %0; movlo %0, #0" \ -@@ -150,8 +178,12 @@ extern int __get_user_4(void *); - - #define get_user(x,p) \ - ({ \ -+ int __e; \ - might_fault(); \ -- __get_user_check(x,p); \ -+ pax_open_userland(); \ -+ __e = __get_user_check(x,p); \ -+ pax_close_userland(); \ -+ __e; \ - }) - - extern int __put_user_1(void *, unsigned int); -@@ -196,8 +228,12 @@ extern int __put_user_8(void *, unsigned long long); - - #define put_user(x,p) \ - ({ \ -+ int __e; \ - might_fault(); \ -- __put_user_check(x,p); \ -+ pax_open_userland(); \ -+ __e = __put_user_check(x,p); \ -+ pax_close_userland(); \ -+ __e; \ - }) - - #else /* CONFIG_MMU */ -@@ -221,6 +257,7 @@ static inline void set_fs(mm_segment_t fs) - - #endif /* CONFIG_MMU */ - -+#define access_ok_noprefault(type,addr,size) access_ok((type),(addr),(size)) - #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) - - #define user_addr_max() \ -@@ -238,13 +275,17 @@ static inline void set_fs(mm_segment_t fs) - #define __get_user(x,ptr) \ - ({ \ - long __gu_err = 0; \ -+ pax_open_userland(); \ - __get_user_err((x),(ptr),__gu_err); \ -+ pax_close_userland(); \ - __gu_err; \ - }) - - #define __get_user_error(x,ptr,err) \ - ({ \ -+ pax_open_userland(); \ - __get_user_err((x),(ptr),err); \ -+ pax_close_userland(); \ - (void) 0; \ - }) - -@@ -320,13 +361,17 @@ do { \ - #define __put_user(x,ptr) \ - ({ \ - long __pu_err = 0; \ -+ pax_open_userland(); \ - __put_user_err((x),(ptr),__pu_err); \ -+ pax_close_userland(); \ - __pu_err; \ - }) - - #define __put_user_error(x,ptr,err) \ - ({ \ -+ pax_open_userland(); \ - __put_user_err((x),(ptr),err); \ -+ pax_close_userland(); \ - (void) 0; \ - }) - -@@ -426,11 +471,44 @@ do { \ - - - #ifdef CONFIG_MMU --extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); --extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); -+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n); -+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n); -+ -+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) -+{ -+ unsigned long ret; -+ -+ check_object_size(to, n, false); -+ pax_open_userland(); -+ ret = ___copy_from_user(to, from, n); -+ pax_close_userland(); -+ return ret; -+} -+ -+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) -+{ -+ unsigned long ret; -+ -+ check_object_size(from, n, true); -+ pax_open_userland(); -+ ret = ___copy_to_user(to, from, n); -+ pax_close_userland(); -+ return ret; -+} -+ - extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); --extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); -+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n); - extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); -+ -+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n) -+{ -+ unsigned long ret; -+ pax_open_userland(); -+ ret = ___clear_user(addr, n); -+ pax_close_userland(); -+ return ret; -+} -+ - #else - #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) - #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) -@@ -439,6 +517,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l - - static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - if (access_ok(VERIFY_READ, from, n)) - n = __copy_from_user(to, from, n); - else /* security hole - plug it */ -@@ -448,6 +529,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u - - static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); - return n; -diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h -index 5af0ed1..cea83883 100644 ---- a/arch/arm/include/uapi/asm/ptrace.h -+++ b/arch/arm/include/uapi/asm/ptrace.h -@@ -92,7 +92,7 @@ - * ARMv7 groups of PSR bits - */ - #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */ --#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ -+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */ - #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ - #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ - -diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c -index 85e664b..419a1cd 100644 ---- a/arch/arm/kernel/armksyms.c -+++ b/arch/arm/kernel/armksyms.c -@@ -55,7 +55,7 @@ EXPORT_SYMBOL(arm_delay_ops); - - /* networking */ - EXPORT_SYMBOL(csum_partial); --EXPORT_SYMBOL(csum_partial_copy_from_user); -+EXPORT_SYMBOL(__csum_partial_copy_from_user); - EXPORT_SYMBOL(csum_partial_copy_nocheck); - EXPORT_SYMBOL(__csum_ipv6_magic); - -@@ -91,9 +91,9 @@ EXPORT_SYMBOL(__memzero); - #ifdef CONFIG_MMU - EXPORT_SYMBOL(copy_page); - --EXPORT_SYMBOL(__copy_from_user); --EXPORT_SYMBOL(__copy_to_user); --EXPORT_SYMBOL(__clear_user); -+EXPORT_SYMBOL(___copy_from_user); -+EXPORT_SYMBOL(___copy_to_user); -+EXPORT_SYMBOL(___clear_user); - - EXPORT_SYMBOL(__get_user_1); - EXPORT_SYMBOL(__get_user_2); -diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S -index 1879e8d..b2207fc 100644 ---- a/arch/arm/kernel/entry-armv.S -+++ b/arch/arm/kernel/entry-armv.S -@@ -47,6 +47,87 @@ - 9997: - .endm - -+ .macro pax_enter_kernel -+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) -+ @ make aligned space for saved DACR -+ sub sp, sp, #8 -+ @ save regs -+ stmdb sp!, {r1, r2} -+ @ read DACR from cpu_domain into r1 -+ mov r2, sp -+ @ assume 8K pages, since we have to split the immediate in two -+ bic r2, r2, #(0x1fc0) -+ bic r2, r2, #(0x3f) -+ ldr r1, [r2, #TI_CPU_DOMAIN] -+ @ store old DACR on stack -+ str r1, [sp, #8] -+#ifdef CONFIG_PAX_KERNEXEC -+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT -+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) -+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) -+#endif -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ @ set current DOMAIN_USER to DOMAIN_NOACCESS -+ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) -+#endif -+ @ write r1 to current_thread_info()->cpu_domain -+ str r1, [r2, #TI_CPU_DOMAIN] -+ @ write r1 to DACR -+ mcr p15, 0, r1, c3, c0, 0 -+ @ instruction sync -+ instr_sync -+ @ restore regs -+ ldmia sp!, {r1, r2} -+#endif -+ .endm -+ -+ .macro pax_open_userland -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ @ save regs -+ stmdb sp!, {r0, r1} -+ @ read DACR from cpu_domain into r1 -+ mov r0, sp -+ @ assume 8K pages, since we have to split the immediate in two -+ bic r0, r0, #(0x1fc0) -+ bic r0, r0, #(0x3f) -+ ldr r1, [r0, #TI_CPU_DOMAIN] -+ @ set current DOMAIN_USER to DOMAIN_CLIENT -+ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) -+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF)) -+ @ write r1 to current_thread_info()->cpu_domain -+ str r1, [r0, #TI_CPU_DOMAIN] -+ @ write r1 to DACR -+ mcr p15, 0, r1, c3, c0, 0 -+ @ instruction sync -+ instr_sync -+ @ restore regs -+ ldmia sp!, {r0, r1} -+#endif -+ .endm -+ -+ .macro pax_close_userland -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ @ save regs -+ stmdb sp!, {r0, r1} -+ @ read DACR from cpu_domain into r1 -+ mov r0, sp -+ @ assume 8K pages, since we have to split the immediate in two -+ bic r0, r0, #(0x1fc0) -+ bic r0, r0, #(0x3f) -+ ldr r1, [r0, #TI_CPU_DOMAIN] -+ @ set current DOMAIN_USER to DOMAIN_NOACCESS -+ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) -+ @ write r1 to current_thread_info()->cpu_domain -+ str r1, [r0, #TI_CPU_DOMAIN] -+ @ write r1 to DACR -+ mcr p15, 0, r1, c3, c0, 0 -+ @ instruction sync -+ instr_sync -+ @ restore regs -+ ldmia sp!, {r0, r1} -+#endif -+ .endm -+ - .macro pabt_helper - @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 - #ifdef MULTI_PABORT -@@ -89,11 +170,15 @@ - * Invalid mode handlers - */ - .macro inv_entry, reason -+ -+ pax_enter_kernel -+ - sub sp, sp, #S_FRAME_SIZE - ARM( stmib sp, {r1 - lr} ) - THUMB( stmia sp, {r0 - r12} ) - THUMB( str sp, [sp, #S_SP] ) - THUMB( str lr, [sp, #S_LR] ) -+ - mov r1, #\reason - .endm - -@@ -149,7 +234,11 @@ ENDPROC(__und_invalid) - .macro svc_entry, stack_hole=0 - UNWIND(.fnstart ) - UNWIND(.save {r0 - pc} ) -+ -+ pax_enter_kernel -+ - sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) -+ - #ifdef CONFIG_THUMB2_KERNEL - SPFIX( str r0, [sp] ) @ temporarily saved - SPFIX( mov r0, sp ) -@@ -164,7 +253,12 @@ ENDPROC(__und_invalid) - ldmia r0, {r3 - r5} - add r7, sp, #S_SP - 4 @ here for interlock avoidance - mov r6, #-1 @ "" "" "" "" -+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) -+ @ offset sp by 8 as done in pax_enter_kernel -+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4) -+#else - add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) -+#endif - SPFIX( addeq r2, r2, #4 ) - str r3, [sp, #-4]! @ save the "real" r0 copied - @ from the exception stack -@@ -317,6 +411,9 @@ ENDPROC(__pabt_svc) - .macro usr_entry - UNWIND(.fnstart ) - UNWIND(.cantunwind ) @ don't unwind the user space -+ -+ pax_enter_kernel_user -+ - sub sp, sp, #S_FRAME_SIZE - ARM( stmib sp, {r1 - r12} ) - THUMB( stmia sp, {r0 - r12} ) -@@ -416,7 +513,9 @@ __und_usr: - tst r3, #PSR_T_BIT @ Thumb mode? - bne __und_usr_thumb - sub r4, r2, #4 @ ARM instr at LR - 4 -+ pax_open_userland - 1: ldrt r0, [r4] -+ pax_close_userland - ARM_BE8(rev r0, r0) @ little endian instruction - - @ r0 = 32-bit ARM instruction which caused the exception -@@ -450,11 +549,15 @@ __und_usr_thumb: - */ - .arch armv6t2 - #endif -+ pax_open_userland - 2: ldrht r5, [r4] -+ pax_close_userland - ARM_BE8(rev16 r5, r5) @ little endian instruction - cmp r5, #0xe800 @ 32bit instruction if xx != 0 - blo __und_usr_fault_16 @ 16bit undefined instruction -+ pax_open_userland - 3: ldrht r0, [r2] -+ pax_close_userland - ARM_BE8(rev16 r0, r0) @ little endian instruction - add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 - str r2, [sp, #S_PC] @ it's a 2x16bit instr, update -@@ -484,7 +587,8 @@ ENDPROC(__und_usr) - */ - .pushsection .fixup, "ax" - .align 2 --4: mov pc, r9 -+4: pax_close_userland -+ mov pc, r9 - .popsection - .pushsection __ex_table,"a" - .long 1b, 4b -@@ -694,7 +798,7 @@ ENTRY(__switch_to) - THUMB( str lr, [ip], #4 ) - ldr r4, [r2, #TI_TP_VALUE] - ldr r5, [r2, #TI_TP_VALUE + 4] --#ifdef CONFIG_CPU_USE_DOMAINS -+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) - ldr r6, [r2, #TI_CPU_DOMAIN] - #endif - switch_tls r1, r4, r5, r3, r7 -@@ -703,7 +807,7 @@ ENTRY(__switch_to) - ldr r8, =__stack_chk_guard - ldr r7, [r7, #TSK_STACK_CANARY] - #endif --#ifdef CONFIG_CPU_USE_DOMAINS -+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) - mcr p15, 0, r6, c3, c0, 0 @ Set domain register - #endif - mov r5, r0 -diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S -index 98dd389..e6878f2 100644 ---- a/arch/arm/kernel/entry-common.S -+++ b/arch/arm/kernel/entry-common.S -@@ -10,18 +10,46 @@ - - #include <asm/unistd.h> - #include <asm/ftrace.h> -+#include <asm/domain.h> - #include <asm/unwind.h> - -+#include "entry-header.S" -+ - #ifdef CONFIG_NEED_RET_TO_USER - #include <mach/entry-macro.S> - #else - .macro arch_ret_to_user, tmp1, tmp2 -+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) -+ @ save regs -+ stmdb sp!, {r1, r2} -+ @ read DACR from cpu_domain into r1 -+ mov r2, sp -+ @ assume 8K pages, since we have to split the immediate in two -+ bic r2, r2, #(0x1fc0) -+ bic r2, r2, #(0x3f) -+ ldr r1, [r2, #TI_CPU_DOMAIN] -+#ifdef CONFIG_PAX_KERNEXEC -+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT -+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) -+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) -+#endif -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ @ set current DOMAIN_USER to DOMAIN_UDEREF -+ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) -+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF)) -+#endif -+ @ write r1 to current_thread_info()->cpu_domain -+ str r1, [r2, #TI_CPU_DOMAIN] -+ @ write r1 to DACR -+ mcr p15, 0, r1, c3, c0, 0 -+ @ instruction sync -+ instr_sync -+ @ restore regs -+ ldmia sp!, {r1, r2} -+#endif - .endm - #endif - --#include "entry-header.S" -- -- - .align 5 - /* - * This is the fast syscall return path. We do as little as -@@ -413,6 +441,12 @@ ENTRY(vector_swi) - USER( ldr scno, [lr, #-4] ) @ get SWI instruction - #endif - -+ /* -+ * do this here to avoid a performance hit of wrapping the code above -+ * that directly dereferences userland to parse the SWI instruction -+ */ -+ pax_enter_kernel_user -+ - adr tbl, sys_call_table @ load syscall table pointer - - #if defined(CONFIG_OABI_COMPAT) -diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S -index 88c6bab..652981b 100644 ---- a/arch/arm/kernel/entry-header.S -+++ b/arch/arm/kernel/entry-header.S -@@ -188,6 +188,60 @@ - msr cpsr_c, \rtemp @ switch back to the SVC mode - .endm - -+ .macro pax_enter_kernel_user -+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) -+ @ save regs -+ stmdb sp!, {r0, r1} -+ @ read DACR from cpu_domain into r1 -+ mov r0, sp -+ @ assume 8K pages, since we have to split the immediate in two -+ bic r0, r0, #(0x1fc0) -+ bic r0, r0, #(0x3f) -+ ldr r1, [r0, #TI_CPU_DOMAIN] -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ @ set current DOMAIN_USER to DOMAIN_NOACCESS -+ bic r1, r1, #(domain_val(DOMAIN_USER, 3)) -+#endif -+#ifdef CONFIG_PAX_KERNEXEC -+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT -+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3)) -+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT)) -+#endif -+ @ write r1 to current_thread_info()->cpu_domain -+ str r1, [r0, #TI_CPU_DOMAIN] -+ @ write r1 to DACR -+ mcr p15, 0, r1, c3, c0, 0 -+ @ instruction sync -+ instr_sync -+ @ restore regs -+ ldmia sp!, {r0, r1} -+#endif -+ .endm -+ -+ .macro pax_exit_kernel -+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) -+ @ save regs -+ stmdb sp!, {r0, r1} -+ @ read old DACR from stack into r1 -+ ldr r1, [sp, #(8 + S_SP)] -+ sub r1, r1, #8 -+ ldr r1, [r1] -+ -+ @ write r1 to current_thread_info()->cpu_domain -+ mov r0, sp -+ @ assume 8K pages, since we have to split the immediate in two -+ bic r0, r0, #(0x1fc0) -+ bic r0, r0, #(0x3f) -+ str r1, [r0, #TI_CPU_DOMAIN] -+ @ write r1 to DACR -+ mcr p15, 0, r1, c3, c0, 0 -+ @ instruction sync -+ instr_sync -+ @ restore regs -+ ldmia sp!, {r0, r1} -+#endif -+ .endm -+ - #ifndef CONFIG_THUMB2_KERNEL - .macro svc_exit, rpsr, irq = 0 - .if \irq != 0 -@@ -207,6 +261,9 @@ - blne trace_hardirqs_off - #endif - .endif -+ -+ pax_exit_kernel -+ - msr spsr_cxsf, \rpsr - #if defined(CONFIG_CPU_V6) - ldr r0, [sp] -@@ -270,6 +327,9 @@ - blne trace_hardirqs_off - #endif - .endif -+ -+ pax_exit_kernel -+ - ldr lr, [sp, #S_SP] @ top of the stack - ldrd r0, r1, [sp, #S_LR] @ calling lr and pc - clrex @ clear the exclusive monitor -diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c -index 918875d..cd5fa27 100644 ---- a/arch/arm/kernel/fiq.c -+++ b/arch/arm/kernel/fiq.c -@@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length) - void *base = vectors_page; - unsigned offset = FIQ_OFFSET; - -+ pax_open_kernel(); - memcpy(base + offset, start, length); -+ pax_close_kernel(); -+ - if (!cache_is_vipt_nonaliasing()) - flush_icache_range((unsigned long)base + offset, offset + - length); -diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S -index f5f381d..a6f36a1 100644 ---- a/arch/arm/kernel/head.S -+++ b/arch/arm/kernel/head.S -@@ -437,7 +437,7 @@ __enable_mmu: - mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ - domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ -- domain_val(DOMAIN_IO, DOMAIN_CLIENT)) -+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT)) - mcr p15, 0, r5, c3, c0, 0 @ load domain access register - mcr p15, 0, r4, c2, c0, 0 @ load page table pointer - #endif -diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c -index 45e4781..8eac93d 100644 ---- a/arch/arm/kernel/module.c -+++ b/arch/arm/kernel/module.c -@@ -38,12 +38,39 @@ - #endif - - #ifdef CONFIG_MMU --void *module_alloc(unsigned long size) -+static inline void *__module_alloc(unsigned long size, pgprot_t prot) - { -+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR) -+ return NULL; - return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, -- GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, -+ GFP_KERNEL, prot, NUMA_NO_NODE, - __builtin_return_address(0)); - } -+ -+void *module_alloc(unsigned long size) -+{ -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ return __module_alloc(size, PAGE_KERNEL); -+#else -+ return __module_alloc(size, PAGE_KERNEL_EXEC); -+#endif -+ -+} -+ -+#ifdef CONFIG_PAX_KERNEXEC -+void module_free_exec(struct module *mod, void *module_region) -+{ -+ module_free(mod, module_region); -+} -+EXPORT_SYMBOL(module_free_exec); -+ -+void *module_alloc_exec(unsigned long size) -+{ -+ return __module_alloc(size, PAGE_KERNEL_EXEC); -+} -+EXPORT_SYMBOL(module_alloc_exec); -+#endif - #endif - - int -diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c -index 07314af..c46655c 100644 ---- a/arch/arm/kernel/patch.c -+++ b/arch/arm/kernel/patch.c -@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn) - bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); - int size; - -+ pax_open_kernel(); - if (thumb2 && __opcode_is_thumb16(insn)) { - *(u16 *)addr = __opcode_to_mem_thumb16(insn); - size = sizeof(u16); -@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn) - *(u32 *)addr = insn; - size = sizeof(u32); - } -+ pax_close_kernel(); - - flush_icache_range((uintptr_t)(addr), - (uintptr_t)(addr) + size); -diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c -index 5f6e650..b5e6630 100644 ---- a/arch/arm/kernel/process.c -+++ b/arch/arm/kernel/process.c -@@ -217,6 +217,7 @@ void machine_power_off(void) - - if (pm_power_off) - pm_power_off(); -+ BUG(); - } - - /* -@@ -230,7 +231,7 @@ void machine_power_off(void) - * executing pre-reset code, and using RAM that the primary CPU's code wishes - * to use. Implementing such co-ordination would be essentially impossible. - */ --void machine_restart(char *cmd) -+__noreturn void machine_restart(char *cmd) - { - local_irq_disable(); - smp_send_stop(); -@@ -253,8 +254,8 @@ void __show_regs(struct pt_regs *regs) - - show_regs_print_info(KERN_DEFAULT); - -- print_symbol("PC is at %s\n", instruction_pointer(regs)); -- print_symbol("LR is at %s\n", regs->ARM_lr); -+ printk("PC is at %pA\n", (void *)instruction_pointer(regs)); -+ printk("LR is at %pA\n", (void *)regs->ARM_lr); - printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" - "sp : %08lx ip : %08lx fp : %08lx\n", - regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, -@@ -427,12 +428,6 @@ unsigned long get_wchan(struct task_struct *p) - return 0; - } - --unsigned long arch_randomize_brk(struct mm_struct *mm) --{ -- unsigned long range_end = mm->brk + 0x02000000; -- return randomize_range(mm->brk, range_end, 0) ? : mm->brk; --} -- - #ifdef CONFIG_MMU - #ifdef CONFIG_KUSER_HELPERS - /* -@@ -448,7 +443,7 @@ static struct vm_area_struct gate_vma = { - - static int __init gate_vma_init(void) - { -- gate_vma.vm_page_prot = PAGE_READONLY_EXEC; -+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); - return 0; - } - arch_initcall(gate_vma_init); -@@ -474,41 +469,16 @@ int in_gate_area_no_mm(unsigned long addr) - - const char *arch_vma_name(struct vm_area_struct *vma) - { -- return is_gate_vma(vma) ? "[vectors]" : -- (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? -- "[sigpage]" : NULL; -+ return is_gate_vma(vma) ? "[vectors]" : NULL; - } - --static struct page *signal_page; --extern struct page *get_signal_page(void); -- - int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) - { - struct mm_struct *mm = current->mm; -- unsigned long addr; -- int ret; -- -- if (!signal_page) -- signal_page = get_signal_page(); -- if (!signal_page) -- return -ENOMEM; - - down_write(&mm->mmap_sem); -- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); -- if (IS_ERR_VALUE(addr)) { -- ret = addr; -- goto up_fail; -- } -- -- ret = install_special_mapping(mm, addr, PAGE_SIZE, -- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, -- &signal_page); -- -- if (ret == 0) -- mm->context.sigpage = addr; -- -- up_fail: -+ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC; - up_write(&mm->mmap_sem); -- return ret; -+ return 0; - } - #endif -diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c -index 4693188..4596c5e 100644 ---- a/arch/arm/kernel/psci.c -+++ b/arch/arm/kernel/psci.c -@@ -24,7 +24,7 @@ - #include <asm/opcodes-virt.h> - #include <asm/psci.h> - --struct psci_operations psci_ops; -+struct psci_operations psci_ops __read_only; - - static int (*invoke_psci_fn)(u32, u32, u32, u32); - -diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c -index 0dd3b79..b67388e 100644 ---- a/arch/arm/kernel/ptrace.c -+++ b/arch/arm/kernel/ptrace.c -@@ -908,7 +908,7 @@ enum ptrace_syscall_dir { - PTRACE_SYSCALL_EXIT, - }; - --static int tracehook_report_syscall(struct pt_regs *regs, -+static void tracehook_report_syscall(struct pt_regs *regs, - enum ptrace_syscall_dir dir) - { - unsigned long ip; -@@ -926,19 +926,29 @@ static int tracehook_report_syscall(struct pt_regs *regs, - current_thread_info()->syscall = -1; - - regs->ARM_ip = ip; -- return current_thread_info()->syscall; - } - -+#ifdef CONFIG_GRKERNSEC_SETXID -+extern void gr_delayed_cred_worker(void); -+#endif -+ - asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) - { - current_thread_info()->syscall = scno; - -+#ifdef CONFIG_GRKERNSEC_SETXID -+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) -+ gr_delayed_cred_worker(); -+#endif -+ - /* Do the secure computing check first; failures should be fast. */ - if (secure_computing(scno) == -1) - return -1; - - if (test_thread_flag(TIF_SYSCALL_TRACE)) -- scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); -+ tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); -+ -+ scno = current_thread_info()->syscall; - - if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) - trace_sys_enter(regs, scno); -diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c -index aab70f6..bd2751b 100644 ---- a/arch/arm/kernel/setup.c -+++ b/arch/arm/kernel/setup.c -@@ -100,21 +100,23 @@ EXPORT_SYMBOL(system_serial_high); - unsigned int elf_hwcap __read_mostly; - EXPORT_SYMBOL(elf_hwcap); - -+pteval_t __supported_pte_mask __read_only; -+pmdval_t __supported_pmd_mask __read_only; - - #ifdef MULTI_CPU --struct processor processor __read_mostly; -+struct processor processor __read_only; - #endif - #ifdef MULTI_TLB --struct cpu_tlb_fns cpu_tlb __read_mostly; -+struct cpu_tlb_fns cpu_tlb __read_only; - #endif - #ifdef MULTI_USER --struct cpu_user_fns cpu_user __read_mostly; -+struct cpu_user_fns cpu_user __read_only; - #endif - #ifdef MULTI_CACHE --struct cpu_cache_fns cpu_cache __read_mostly; -+struct cpu_cache_fns cpu_cache __read_only; - #endif - #ifdef CONFIG_OUTER_CACHE --struct outer_cache_fns outer_cache __read_mostly; -+struct outer_cache_fns outer_cache __read_only; - EXPORT_SYMBOL(outer_cache); - #endif - -@@ -247,9 +249,13 @@ static int __get_cpu_architecture(void) - asm("mrc p15, 0, %0, c0, c1, 4" - : "=r" (mmfr0)); - if ((mmfr0 & 0x0000000f) >= 0x00000003 || -- (mmfr0 & 0x000000f0) >= 0x00000030) -+ (mmfr0 & 0x000000f0) >= 0x00000030) { - cpu_arch = CPU_ARCH_ARMv7; -- else if ((mmfr0 & 0x0000000f) == 0x00000002 || -+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) { -+ __supported_pte_mask |= L_PTE_PXN; -+ __supported_pmd_mask |= PMD_PXNTABLE; -+ } -+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 || - (mmfr0 & 0x000000f0) == 0x00000020) - cpu_arch = CPU_ARCH_ARMv6; - else -diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c -index 04d6388..5115238 100644 ---- a/arch/arm/kernel/signal.c -+++ b/arch/arm/kernel/signal.c -@@ -23,8 +23,6 @@ - - extern const unsigned long sigreturn_codes[7]; - --static unsigned long signal_return_offset; -- - #ifdef CONFIG_CRUNCH - static int preserve_crunch_context(struct crunch_sigframe __user *frame) - { -@@ -395,8 +393,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig, - * except when the MPU has protected the vectors - * page from PL0 - */ -- retcode = mm->context.sigpage + signal_return_offset + -- (idx << 2) + thumb; -+ retcode = mm->context.sigpage + (idx << 2) + thumb; - } else - #endif - { -@@ -600,33 +597,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) - } while (thread_flags & _TIF_WORK_MASK); - return 0; - } -- --struct page *get_signal_page(void) --{ -- unsigned long ptr; -- unsigned offset; -- struct page *page; -- void *addr; -- -- page = alloc_pages(GFP_KERNEL, 0); -- -- if (!page) -- return NULL; -- -- addr = page_address(page); -- -- /* Give the signal return code some randomness */ -- offset = 0x200 + (get_random_int() & 0x7fc); -- signal_return_offset = offset; -- -- /* -- * Copy signal return handlers into the vector page, and -- * set sigreturn to be a pointer to these. -- */ -- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes)); -- -- ptr = (unsigned long)addr + offset; -- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes)); -- -- return page; --} -diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c -index 8cd3724..ea86e94 100644 ---- a/arch/arm/kernel/smp.c -+++ b/arch/arm/kernel/smp.c -@@ -73,7 +73,7 @@ enum ipi_msg_type { - - static DECLARE_COMPLETION(cpu_running); - --static struct smp_operations smp_ops; -+static struct smp_operations smp_ops __read_only; - - void __init smp_set_ops(struct smp_operations *ops) - { -diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c -index 7a3be1d..b00c7de 100644 ---- a/arch/arm/kernel/tcm.c -+++ b/arch/arm/kernel/tcm.c -@@ -61,7 +61,7 @@ static struct map_desc itcm_iomap[] __initdata = { - .virtual = ITCM_OFFSET, - .pfn = __phys_to_pfn(ITCM_OFFSET), - .length = 0, -- .type = MT_MEMORY_RWX_ITCM, -+ .type = MT_MEMORY_RX_ITCM, - } - }; - -@@ -267,7 +267,9 @@ no_dtcm: - start = &__sitcm_text; - end = &__eitcm_text; - ram = &__itcm_start; -+ pax_open_kernel(); - memcpy(start, ram, itcm_code_sz); -+ pax_close_kernel(); - pr_debug("CPU ITCM: copied code from %p - %p\n", - start, end); - itcm_present = true; -diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c -index 3f31443..ae30fc0 100644 ---- a/arch/arm/kernel/traps.c -+++ b/arch/arm/kernel/traps.c -@@ -62,7 +62,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long); - void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame) - { - #ifdef CONFIG_KALLSYMS -- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from); -+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from); - #else - printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); - #endif -@@ -264,6 +264,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; - static int die_owner = -1; - static unsigned int die_nest_count; - -+extern void gr_handle_kernel_exploit(void); -+ - static unsigned long oops_begin(void) - { - int cpu; -@@ -306,6 +308,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) - panic("Fatal exception in interrupt"); - if (panic_on_oops) - panic("Fatal exception"); -+ -+ gr_handle_kernel_exploit(); -+ - if (signr) - do_exit(signr); - } -@@ -857,7 +862,11 @@ void __init early_trap_init(void *vectors_base) - kuser_init(vectors_base); - - flush_icache_range(vectors, vectors + PAGE_SIZE * 2); -- modify_domain(DOMAIN_USER, DOMAIN_CLIENT); -+ -+#ifndef CONFIG_PAX_MEMORY_UDEREF -+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT); -+#endif -+ - #else /* ifndef CONFIG_CPU_V7M */ - /* - * on V7-M there is no need to copy the vector table to a dedicated -diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S -index 7bcee5c..e2f3249 100644 ---- a/arch/arm/kernel/vmlinux.lds.S -+++ b/arch/arm/kernel/vmlinux.lds.S -@@ -8,7 +8,11 @@ - #include <asm/thread_info.h> - #include <asm/memory.h> - #include <asm/page.h> -- -+ -+#ifdef CONFIG_PAX_KERNEXEC -+#include <asm/pgtable.h> -+#endif -+ - #define PROC_INFO \ - . = ALIGN(4); \ - VMLINUX_SYMBOL(__proc_info_begin) = .; \ -@@ -34,7 +38,7 @@ - #endif - - #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \ -- defined(CONFIG_GENERIC_BUG) -+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT) - #define ARM_EXIT_KEEP(x) x - #define ARM_EXIT_DISCARD(x) - #else -@@ -90,6 +94,11 @@ SECTIONS - _text = .; - HEAD_TEXT - } -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ . = ALIGN(1<<SECTION_SHIFT); -+#endif -+ - .text : { /* Real text segment */ - _stext = .; /* Text and read-only data */ - __exception_text_start = .; -@@ -112,6 +121,8 @@ SECTIONS - ARM_CPU_KEEP(PROC_INFO) - } - -+ _etext = .; /* End of text section */ -+ - RO_DATA(PAGE_SIZE) - - . = ALIGN(4); -@@ -142,7 +153,9 @@ SECTIONS - - NOTES - -- _etext = .; /* End of text and rodata section */ -+#ifdef CONFIG_PAX_KERNEXEC -+ . = ALIGN(1<<SECTION_SHIFT); -+#endif - - #ifndef CONFIG_XIP_KERNEL - . = ALIGN(PAGE_SIZE); -@@ -220,6 +233,11 @@ SECTIONS - . = PAGE_OFFSET + TEXT_OFFSET; - #else - __init_end = .; -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ . = ALIGN(1<<SECTION_SHIFT); -+#endif -+ - . = ALIGN(THREAD_SIZE); - __data_loc = .; - #endif -diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c -index f6a52a2..f662d45 100644 ---- a/arch/arm/kvm/arm.c -+++ b/arch/arm/kvm/arm.c -@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors; - static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); - - /* The VMID used in the VTTBR */ --static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); -+static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1); - static u8 kvm_next_vmid; - static DEFINE_SPINLOCK(kvm_vmid_lock); - -@@ -376,7 +376,7 @@ void force_vm_exit(const cpumask_t *mask) - */ - static bool need_new_vmid_gen(struct kvm *kvm) - { -- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); -+ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen)); - } - - /** -@@ -409,7 +409,7 @@ static void update_vttbr(struct kvm *kvm) - - /* First user of a new VMID generation? */ - if (unlikely(kvm_next_vmid == 0)) { -- atomic64_inc(&kvm_vmid_gen); -+ atomic64_inc_unchecked(&kvm_vmid_gen); - kvm_next_vmid = 1; - - /* -@@ -426,7 +426,7 @@ static void update_vttbr(struct kvm *kvm) - kvm_call_hyp(__kvm_flush_vm_context); - } - -- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); -+ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen); - kvm->arch.vmid = kvm_next_vmid; - kvm_next_vmid++; - -@@ -1022,7 +1022,7 @@ static void check_kvm_target_cpu(void *ret) - /** - * Initialize Hyp-mode and memory mappings on all CPUs. - */ --int kvm_arch_init(void *opaque) -+int kvm_arch_init(const void *opaque) - { - int err; - int ret, cpu; -diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S -index 14a0d98..7771a7d 100644 ---- a/arch/arm/lib/clear_user.S -+++ b/arch/arm/lib/clear_user.S -@@ -12,14 +12,14 @@ - - .text - --/* Prototype: int __clear_user(void *addr, size_t sz) -+/* Prototype: int ___clear_user(void *addr, size_t sz) - * Purpose : clear some user memory - * Params : addr - user memory address to clear - * : sz - number of bytes to clear - * Returns : number of bytes NOT cleared - */ - ENTRY(__clear_user_std) --WEAK(__clear_user) -+WEAK(___clear_user) - stmfd sp!, {r1, lr} - mov r2, #0 - cmp r1, #4 -@@ -44,7 +44,7 @@ WEAK(__clear_user) - USER( strnebt r2, [r0]) - mov r0, #0 - ldmfd sp!, {r1, pc} --ENDPROC(__clear_user) -+ENDPROC(___clear_user) - ENDPROC(__clear_user_std) - - .pushsection .fixup,"ax" -diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S -index 66a477a..bee61d3 100644 ---- a/arch/arm/lib/copy_from_user.S -+++ b/arch/arm/lib/copy_from_user.S -@@ -16,7 +16,7 @@ - /* - * Prototype: - * -- * size_t __copy_from_user(void *to, const void *from, size_t n) -+ * size_t ___copy_from_user(void *to, const void *from, size_t n) - * - * Purpose: - * -@@ -84,11 +84,11 @@ - - .text - --ENTRY(__copy_from_user) -+ENTRY(___copy_from_user) - - #include "copy_template.S" - --ENDPROC(__copy_from_user) -+ENDPROC(___copy_from_user) - - .pushsection .fixup,"ax" - .align 0 -diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S -index 6ee2f67..d1cce76 100644 ---- a/arch/arm/lib/copy_page.S -+++ b/arch/arm/lib/copy_page.S -@@ -10,6 +10,7 @@ - * ASM optimised string functions - */ - #include <linux/linkage.h> -+#include <linux/const.h> - #include <asm/assembler.h> - #include <asm/asm-offsets.h> - #include <asm/cache.h> -diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S -index d066df6..df28194 100644 ---- a/arch/arm/lib/copy_to_user.S -+++ b/arch/arm/lib/copy_to_user.S -@@ -16,7 +16,7 @@ - /* - * Prototype: - * -- * size_t __copy_to_user(void *to, const void *from, size_t n) -+ * size_t ___copy_to_user(void *to, const void *from, size_t n) - * - * Purpose: - * -@@ -88,11 +88,11 @@ - .text - - ENTRY(__copy_to_user_std) --WEAK(__copy_to_user) -+WEAK(___copy_to_user) - - #include "copy_template.S" - --ENDPROC(__copy_to_user) -+ENDPROC(___copy_to_user) - ENDPROC(__copy_to_user_std) - - .pushsection .fixup,"ax" -diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S -index 7d08b43..f7ca7ea 100644 ---- a/arch/arm/lib/csumpartialcopyuser.S -+++ b/arch/arm/lib/csumpartialcopyuser.S -@@ -57,8 +57,8 @@ - * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT - */ - --#define FN_ENTRY ENTRY(csum_partial_copy_from_user) --#define FN_EXIT ENDPROC(csum_partial_copy_from_user) -+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user) -+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user) - - #include "csumpartialcopygeneric.S" - -diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c -index 5306de3..aed6d03 100644 ---- a/arch/arm/lib/delay.c -+++ b/arch/arm/lib/delay.c -@@ -28,7 +28,7 @@ - /* - * Default to the loop-based delay implementation. - */ --struct arm_delay_ops arm_delay_ops = { -+struct arm_delay_ops arm_delay_ops __read_only = { - .delay = __loop_delay, - .const_udelay = __loop_const_udelay, - .udelay = __loop_udelay, -diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c -index 3e58d71..029817c 100644 ---- a/arch/arm/lib/uaccess_with_memcpy.c -+++ b/arch/arm/lib/uaccess_with_memcpy.c -@@ -136,7 +136,7 @@ out: - } - - unsigned long --__copy_to_user(void __user *to, const void *from, unsigned long n) -+___copy_to_user(void __user *to, const void *from, unsigned long n) - { - /* - * This test is stubbed out of the main function above to keep -@@ -190,7 +190,7 @@ out: - return n; - } - --unsigned long __clear_user(void __user *addr, unsigned long n) -+unsigned long ___clear_user(void __user *addr, unsigned long n) - { - /* See rational for this in __copy_to_user() above. */ - if (n < 64) -diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c -index f7ca97b..3d7e719 100644 ---- a/arch/arm/mach-at91/setup.c -+++ b/arch/arm/mach-at91/setup.c -@@ -81,7 +81,7 @@ void __init at91_init_sram(int bank, unsigned long base, unsigned int length) - - desc->pfn = __phys_to_pfn(base); - desc->length = length; -- desc->type = MT_MEMORY_RWX_NONCACHED; -+ desc->type = MT_MEMORY_RW_NONCACHED; - - pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n", - base, length, desc->virtual); -diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c -index f3407a5..bd4256f 100644 ---- a/arch/arm/mach-kirkwood/common.c -+++ b/arch/arm/mach-kirkwood/common.c -@@ -156,7 +156,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw) - clk_gate_ops.disable(hw); - } - --static struct clk_ops clk_gate_fn_ops; -+static int clk_gate_fn_is_enabled(struct clk_hw *hw) -+{ -+ return clk_gate_ops.is_enabled(hw); -+} -+ -+static struct clk_ops clk_gate_fn_ops = { -+ .enable = clk_gate_fn_enable, -+ .disable = clk_gate_fn_disable, -+ .is_enabled = clk_gate_fn_is_enabled, -+}; - - static struct clk __init *clk_register_gate_fn(struct device *dev, - const char *name, -@@ -190,14 +199,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev, - gate_fn->fn_en = fn_en; - gate_fn->fn_dis = fn_dis; - -- /* ops is the gate ops, but with our enable/disable functions */ -- if (clk_gate_fn_ops.enable != clk_gate_fn_enable || -- clk_gate_fn_ops.disable != clk_gate_fn_disable) { -- clk_gate_fn_ops = clk_gate_ops; -- clk_gate_fn_ops.enable = clk_gate_fn_enable; -- clk_gate_fn_ops.disable = clk_gate_fn_disable; -- } -- - clk = clk_register(dev, &gate_fn->gate.hw); - - if (IS_ERR(clk)) -diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c -index aead77a..a2253fa 100644 ---- a/arch/arm/mach-omap2/board-n8x0.c -+++ b/arch/arm/mach-omap2/board-n8x0.c -@@ -568,7 +568,7 @@ static int n8x0_menelaus_late_init(struct device *dev) - } - #endif - --static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = { -+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = { - .late_init = n8x0_menelaus_late_init, - }; - -diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c -index ab43755..ccfa231 100644 ---- a/arch/arm/mach-omap2/gpmc.c -+++ b/arch/arm/mach-omap2/gpmc.c -@@ -148,7 +148,6 @@ struct omap3_gpmc_regs { - }; - - static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ]; --static struct irq_chip gpmc_irq_chip; - static int gpmc_irq_start; - - static struct resource gpmc_mem_root; -@@ -716,6 +715,18 @@ static void gpmc_irq_noop(struct irq_data *data) { } - - static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; } - -+static struct irq_chip gpmc_irq_chip = { -+ .name = "gpmc", -+ .irq_startup = gpmc_irq_noop_ret, -+ .irq_enable = gpmc_irq_enable, -+ .irq_disable = gpmc_irq_disable, -+ .irq_shutdown = gpmc_irq_noop, -+ .irq_ack = gpmc_irq_noop, -+ .irq_mask = gpmc_irq_noop, -+ .irq_unmask = gpmc_irq_noop, -+ -+}; -+ - static int gpmc_setup_irq(void) - { - int i; -@@ -730,15 +741,6 @@ static int gpmc_setup_irq(void) - return gpmc_irq_start; - } - -- gpmc_irq_chip.name = "gpmc"; -- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret; -- gpmc_irq_chip.irq_enable = gpmc_irq_enable; -- gpmc_irq_chip.irq_disable = gpmc_irq_disable; -- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop; -- gpmc_irq_chip.irq_ack = gpmc_irq_noop; -- gpmc_irq_chip.irq_mask = gpmc_irq_noop; -- gpmc_irq_chip.irq_unmask = gpmc_irq_noop; -- - gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE; - gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT; - -diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c -index 667915d..2ee1219 100644 ---- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c -+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c -@@ -84,7 +84,7 @@ struct cpu_pm_ops { - int (*finish_suspend)(unsigned long cpu_state); - void (*resume)(void); - void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state); --}; -+} __no_const; - - static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info); - static struct powerdomain *mpuss_pd; -@@ -102,7 +102,7 @@ static void dummy_cpu_resume(void) - static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state) - {} - --struct cpu_pm_ops omap_pm_ops = { -+static struct cpu_pm_ops omap_pm_ops __read_only = { - .finish_suspend = default_finish_suspend, - .resume = dummy_cpu_resume, - .scu_prepare = dummy_scu_prepare, -diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c -index 3664562..72f85c6 100644 ---- a/arch/arm/mach-omap2/omap-wakeupgen.c -+++ b/arch/arm/mach-omap2/omap-wakeupgen.c -@@ -343,7 +343,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self, - return NOTIFY_OK; - } - --static struct notifier_block __refdata irq_hotplug_notifier = { -+static struct notifier_block irq_hotplug_notifier = { - .notifier_call = irq_cpu_hotplug_notify, - }; - -diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c -index 01ef59d..32ae28a8 100644 ---- a/arch/arm/mach-omap2/omap_device.c -+++ b/arch/arm/mach-omap2/omap_device.c -@@ -510,7 +510,7 @@ void omap_device_delete(struct omap_device *od) - struct platform_device __init *omap_device_build(const char *pdev_name, - int pdev_id, - struct omap_hwmod *oh, -- void *pdata, int pdata_len) -+ const void *pdata, int pdata_len) - { - struct omap_hwmod *ohs[] = { oh }; - -@@ -538,7 +538,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name, - struct platform_device __init *omap_device_build_ss(const char *pdev_name, - int pdev_id, - struct omap_hwmod **ohs, -- int oh_cnt, void *pdata, -+ int oh_cnt, const void *pdata, - int pdata_len) - { - int ret = -ENOMEM; -diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h -index 78c02b3..c94109a 100644 ---- a/arch/arm/mach-omap2/omap_device.h -+++ b/arch/arm/mach-omap2/omap_device.h -@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev); - /* Core code interface */ - - struct platform_device *omap_device_build(const char *pdev_name, int pdev_id, -- struct omap_hwmod *oh, void *pdata, -+ struct omap_hwmod *oh, const void *pdata, - int pdata_len); - - struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id, - struct omap_hwmod **oh, int oh_cnt, -- void *pdata, int pdata_len); -+ const void *pdata, int pdata_len); - - struct omap_device *omap_device_alloc(struct platform_device *pdev, - struct omap_hwmod **ohs, int oh_cnt); -diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c -index 399af1e..ead318a5 100644 ---- a/arch/arm/mach-omap2/omap_hwmod.c -+++ b/arch/arm/mach-omap2/omap_hwmod.c -@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops { - int (*init_clkdm)(struct omap_hwmod *oh); - void (*update_context_lost)(struct omap_hwmod *oh); - int (*get_context_lost)(struct omap_hwmod *oh); --}; -+} __no_const; - - /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */ --static struct omap_hwmod_soc_ops soc_ops; -+static struct omap_hwmod_soc_ops soc_ops __read_only; - - /* omap_hwmod_list contains all registered struct omap_hwmods */ - static LIST_HEAD(omap_hwmod_list); -diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c -index 95fee54..cfa9cf1 100644 ---- a/arch/arm/mach-omap2/powerdomains43xx_data.c -+++ b/arch/arm/mach-omap2/powerdomains43xx_data.c -@@ -10,6 +10,7 @@ - - #include <linux/kernel.h> - #include <linux/init.h> -+#include <asm/pgtable.h> - - #include "powerdomain.h" - -@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void) - - void __init am43xx_powerdomains_init(void) - { -- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp; -+ pax_open_kernel(); -+ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp; -+ pax_close_kernel(); - pwrdm_register_platform_funcs(&omap4_pwrdm_operations); - pwrdm_register_pwrdms(powerdomains_am43xx); - pwrdm_complete_init(); -diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c -index d15c7bb..b2d1f0c 100644 ---- a/arch/arm/mach-omap2/wd_timer.c -+++ b/arch/arm/mach-omap2/wd_timer.c -@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void) - struct omap_hwmod *oh; - char *oh_name = "wd_timer2"; - char *dev_name = "omap_wdt"; -- struct omap_wd_timer_platform_data pdata; -+ static struct omap_wd_timer_platform_data pdata = { -+ .read_reset_sources = prm_read_reset_sources -+ }; - - if (!cpu_class_is_omap2() || of_have_populated_dt()) - return 0; -@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void) - return -EINVAL; - } - -- pdata.read_reset_sources = prm_read_reset_sources; -- - pdev = omap_device_build(dev_name, id, oh, &pdata, - sizeof(struct omap_wd_timer_platform_data)); - WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n", -diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c -index b82dcae..44ee5b6 100644 ---- a/arch/arm/mach-tegra/cpuidle-tegra20.c -+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c -@@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, - bool entered_lp2 = false; - - if (tegra_pending_sgi()) -- ACCESS_ONCE(abort_flag) = true; -+ ACCESS_ONCE_RW(abort_flag) = true; - - cpuidle_coupled_parallel_barrier(dev, &abort_barrier); - -diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h -index 2dea8b5..6499da2 100644 ---- a/arch/arm/mach-ux500/setup.h -+++ b/arch/arm/mach-ux500/setup.h -@@ -33,13 +33,6 @@ extern void ux500_timer_init(void); - .type = MT_DEVICE, \ - } - --#define __MEM_DEV_DESC(x, sz) { \ -- .virtual = IO_ADDRESS(x), \ -- .pfn = __phys_to_pfn(x), \ -- .length = sz, \ -- .type = MT_MEMORY_RWX, \ --} -- - extern struct smp_operations ux500_smp_ops; - extern void ux500_cpu_die(unsigned int cpu); - -diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig -index e9c290c..d0e3d41 100644 ---- a/arch/arm/mm/Kconfig -+++ b/arch/arm/mm/Kconfig -@@ -446,6 +446,7 @@ config CPU_32v5 - - config CPU_32v6 - bool -+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF - select TLS_REG_EMUL if !CPU_32v6K && !MMU - - config CPU_32v6K -@@ -600,6 +601,7 @@ config CPU_CP15_MPU - - config CPU_USE_DOMAINS - bool -+ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF - help - This option enables or disables the use of domain switching - via the set_fs() function. -@@ -798,7 +800,7 @@ config NEED_KUSER_HELPERS - - config KUSER_HELPERS - bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS -- depends on MMU -+ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND) - default y - help - Warning: disabling this option may break user programs. -@@ -812,7 +814,7 @@ config KUSER_HELPERS - See Documentation/arm/kernel_user_helpers.txt for details. - - However, the fixed address nature of these helpers can be used -- by ROP (return orientated programming) authors when creating -+ by ROP (Return Oriented Programming) authors when creating - exploits. - - If all of the binaries and libraries which run on your platform -diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c -index d301662..a6ef72c 100644 ---- a/arch/arm/mm/alignment.c -+++ b/arch/arm/mm/alignment.c -@@ -213,10 +213,12 @@ union offset_union { - #define __get16_unaligned_check(ins,val,addr) \ - do { \ - unsigned int err = 0, v, a = addr; \ -+ pax_open_userland(); \ - __get8_unaligned_check(ins,v,a,err); \ - val = v << ((BE) ? 8 : 0); \ - __get8_unaligned_check(ins,v,a,err); \ - val |= v << ((BE) ? 0 : 8); \ -+ pax_close_userland(); \ - if (err) \ - goto fault; \ - } while (0) -@@ -230,6 +232,7 @@ union offset_union { - #define __get32_unaligned_check(ins,val,addr) \ - do { \ - unsigned int err = 0, v, a = addr; \ -+ pax_open_userland(); \ - __get8_unaligned_check(ins,v,a,err); \ - val = v << ((BE) ? 24 : 0); \ - __get8_unaligned_check(ins,v,a,err); \ -@@ -238,6 +241,7 @@ union offset_union { - val |= v << ((BE) ? 8 : 16); \ - __get8_unaligned_check(ins,v,a,err); \ - val |= v << ((BE) ? 0 : 24); \ -+ pax_close_userland(); \ - if (err) \ - goto fault; \ - } while (0) -@@ -251,6 +255,7 @@ union offset_union { - #define __put16_unaligned_check(ins,val,addr) \ - do { \ - unsigned int err = 0, v = val, a = addr; \ -+ pax_open_userland(); \ - __asm__( FIRST_BYTE_16 \ - ARM( "1: "ins" %1, [%2], #1\n" ) \ - THUMB( "1: "ins" %1, [%2]\n" ) \ -@@ -270,6 +275,7 @@ union offset_union { - " .popsection\n" \ - : "=r" (err), "=&r" (v), "=&r" (a) \ - : "0" (err), "1" (v), "2" (a)); \ -+ pax_close_userland(); \ - if (err) \ - goto fault; \ - } while (0) -@@ -283,6 +289,7 @@ union offset_union { - #define __put32_unaligned_check(ins,val,addr) \ - do { \ - unsigned int err = 0, v = val, a = addr; \ -+ pax_open_userland(); \ - __asm__( FIRST_BYTE_32 \ - ARM( "1: "ins" %1, [%2], #1\n" ) \ - THUMB( "1: "ins" %1, [%2]\n" ) \ -@@ -312,6 +319,7 @@ union offset_union { - " .popsection\n" \ - : "=r" (err), "=&r" (v), "=&r" (a) \ - : "0" (err), "1" (v), "2" (a)); \ -+ pax_close_userland(); \ - if (err) \ - goto fault; \ - } while (0) -diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c -index 7abde2c..9df495f 100644 ---- a/arch/arm/mm/cache-l2x0.c -+++ b/arch/arm/mm/cache-l2x0.c -@@ -46,7 +46,7 @@ struct l2x0_of_data { - void (*setup)(const struct device_node *, u32 *, u32 *); - void (*save)(void); - struct outer_cache_fns outer_cache; --}; -+} __do_const; - - static bool of_init = false; - -diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c -index 4370933..e77848e 100644 ---- a/arch/arm/mm/context.c -+++ b/arch/arm/mm/context.c -@@ -43,7 +43,7 @@ - #define NUM_USER_ASIDS ASID_FIRST_VERSION - - static DEFINE_RAW_SPINLOCK(cpu_asid_lock); --static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); -+static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); - static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); - - static DEFINE_PER_CPU(atomic64_t, active_asids); -@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) - { - static u32 cur_idx = 1; - u64 asid = atomic64_read(&mm->context.id); -- u64 generation = atomic64_read(&asid_generation); -+ u64 generation = atomic64_read_unchecked(&asid_generation); - - if (asid != 0 && is_reserved_asid(asid)) { - /* -@@ -199,7 +199,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) - */ - asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); - if (asid == NUM_USER_ASIDS) { -- generation = atomic64_add_return(ASID_FIRST_VERSION, -+ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION, - &asid_generation); - flush_context(cpu); - asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); -@@ -230,14 +230,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) - cpu_set_reserved_ttbr0(); - - asid = atomic64_read(&mm->context.id); -- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) -+ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) - && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) - goto switch_mm_fastpath; - - raw_spin_lock_irqsave(&cpu_asid_lock, flags); - /* Check that our ASID belongs to the current generation. */ - asid = atomic64_read(&mm->context.id); -- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { -+ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) { - asid = new_context(mm, cpu); - atomic64_set(&mm->context.id, asid); - } -diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c -index eb8830a..e39c4bd 100644 ---- a/arch/arm/mm/fault.c -+++ b/arch/arm/mm/fault.c -@@ -25,6 +25,7 @@ - #include <asm/system_misc.h> - #include <asm/system_info.h> - #include <asm/tlbflush.h> -+#include <asm/sections.h> - - #include "fault.h" - -@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, - if (fixup_exception(regs)) - return; - -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (addr < TASK_SIZE) { -+ if (current->signal->curr_ip) -+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), -+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); -+ else -+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current), -+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); -+ } -+#endif -+ -+#ifdef CONFIG_PAX_KERNEXEC -+ if ((fsr & FSR_WRITE) && -+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) || -+ (MODULES_VADDR <= addr && addr < MODULES_END))) -+ { -+ if (current->signal->curr_ip) -+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), -+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); -+ else -+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current), -+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid())); -+ } -+#endif -+ - /* - * No handler, we'll have to terminate things with extreme prejudice. - */ -@@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, - } - #endif - -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (fsr & FSR_LNX_PF) { -+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - tsk->thread.address = addr; - tsk->thread.error_code = fsr; - tsk->thread.trap_no = 14; -@@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) - } - #endif /* CONFIG_MMU */ - -+#ifdef CONFIG_PAX_PAGEEXEC -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 20; i++) { -+ unsigned char c; -+ if (get_user(c, (__force unsigned char __user *)pc+i)) -+ printk(KERN_CONT "?? "); -+ else -+ printk(KERN_CONT "%02x ", c); -+ } -+ printk("\n"); -+ -+ printk(KERN_ERR "PAX: bytes at SP-4: "); -+ for (i = -1; i < 20; i++) { -+ unsigned long c; -+ if (get_user(c, (__force unsigned long __user *)sp+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08lx ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - /* - * First Level Translation Fault Handler - * -@@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) - const struct fsr_info *inf = fsr_info + fsr_fs(fsr); - struct siginfo info; - -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ if (addr < TASK_SIZE && is_domain_fault(fsr)) { -+ if (current->signal->curr_ip) -+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), -+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); -+ else -+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current), -+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr); -+ goto die; -+ } -+#endif -+ - if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) - return; - -+die: - printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", - inf->name, fsr, addr); - -@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs * - ifsr_info[nr].name = name; - } - -+asmlinkage int sys_sigreturn(struct pt_regs *regs); -+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs); -+ - asmlinkage void __exception - do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) - { - const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); - struct siginfo info; -+ unsigned long pc = instruction_pointer(regs); -+ -+ if (user_mode(regs)) { -+ unsigned long sigpage = current->mm->context.sigpage; -+ -+ if (sigpage <= pc && pc < sigpage + 7*4) { -+ if (pc < sigpage + 3*4) -+ sys_sigreturn(regs); -+ else -+ sys_rt_sigreturn(regs); -+ return; -+ } -+ if (pc == 0xffff0f60UL) { -+ /* -+ * PaX: __kuser_cmpxchg64 emulation -+ */ -+ // TODO -+ //regs->ARM_pc = regs->ARM_lr; -+ //return; -+ } -+ if (pc == 0xffff0fa0UL) { -+ /* -+ * PaX: __kuser_memory_barrier emulation -+ */ -+ // dmb(); implied by the exception -+ regs->ARM_pc = regs->ARM_lr; -+ return; -+ } -+ if (pc == 0xffff0fc0UL) { -+ /* -+ * PaX: __kuser_cmpxchg emulation -+ */ -+ // TODO -+ //long new; -+ //int op; -+ -+ //op = FUTEX_OP_SET << 28; -+ //new = futex_atomic_op_inuser(op, regs->ARM_r2); -+ //regs->ARM_r0 = old != new; -+ //regs->ARM_pc = regs->ARM_lr; -+ //return; -+ } -+ if (pc == 0xffff0fe0UL) { -+ /* -+ * PaX: __kuser_get_tls emulation -+ */ -+ regs->ARM_r0 = current_thread_info()->tp_value[0]; -+ regs->ARM_pc = regs->ARM_lr; -+ return; -+ } -+ } -+ -+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) -+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) { -+ if (current->signal->curr_ip) -+ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", ¤t->signal->curr_ip, current->comm, task_pid_nr(current), -+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), -+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc); -+ else -+ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current), -+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), -+ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc); -+ goto die; -+ } -+#endif -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) { -+#ifdef CONFIG_THUMB2_KERNEL -+ unsigned short bkpt; -+ -+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) { -+#else -+ unsigned int bkpt; -+ -+ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) { -+#endif -+ current->thread.error_code = ifsr; -+ current->thread.trap_no = 0; -+ pax_report_refcount_overflow(regs); -+ fixup_exception(regs); -+ return; -+ } -+ } -+#endif - - if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) - return; - -+die: - printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", - inf->name, ifsr, addr); - -diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h -index cf08bdf..772656c 100644 ---- a/arch/arm/mm/fault.h -+++ b/arch/arm/mm/fault.h -@@ -3,6 +3,7 @@ - - /* - * Fault status register encodings. We steal bit 31 for our own purposes. -+ * Set when the FSR value is from an instruction fault. - */ - #define FSR_LNX_PF (1 << 31) - #define FSR_WRITE (1 << 11) -@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr) - } - #endif - -+/* valid for LPAE and !LPAE */ -+static inline int is_xn_fault(unsigned int fsr) -+{ -+ return ((fsr_fs(fsr) & 0x3c) == 0xc); -+} -+ -+static inline int is_domain_fault(unsigned int fsr) -+{ -+ return ((fsr_fs(fsr) & 0xD) == 0x9); -+} -+ - void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); - unsigned long search_exception_table(unsigned long addr); - -diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c -index 804d615..fcec50a 100644 ---- a/arch/arm/mm/init.c -+++ b/arch/arm/mm/init.c -@@ -30,6 +30,8 @@ - #include <asm/setup.h> - #include <asm/tlb.h> - #include <asm/fixmap.h> -+#include <asm/system_info.h> -+#include <asm/cp15.h> - - #include <asm/mach/arch.h> - #include <asm/mach/map.h> -@@ -625,7 +627,46 @@ void free_initmem(void) - { - #ifdef CONFIG_HAVE_TCM - extern char __tcm_start, __tcm_end; -+#endif - -+#ifdef CONFIG_PAX_KERNEXEC -+ unsigned long addr; -+ pgd_t *pgd; -+ pud_t *pud; -+ pmd_t *pmd; -+ int cpu_arch = cpu_architecture(); -+ unsigned int cr = get_cr(); -+ -+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { -+ /* make pages tables, etc before .text NX */ -+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) { -+ pgd = pgd_offset_k(addr); -+ pud = pud_offset(pgd, addr); -+ pmd = pmd_offset(pud, addr); -+ __section_update(pmd, addr, PMD_SECT_XN); -+ } -+ /* make init NX */ -+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) { -+ pgd = pgd_offset_k(addr); -+ pud = pud_offset(pgd, addr); -+ pmd = pmd_offset(pud, addr); -+ __section_update(pmd, addr, PMD_SECT_XN); -+ } -+ /* make kernel code/rodata RX */ -+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) { -+ pgd = pgd_offset_k(addr); -+ pud = pud_offset(pgd, addr); -+ pmd = pmd_offset(pud, addr); -+#ifdef CONFIG_ARM_LPAE -+ __section_update(pmd, addr, PMD_SECT_RDONLY); -+#else -+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE); -+#endif -+ } -+ } -+#endif -+ -+#ifdef CONFIG_HAVE_TCM - poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); - free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); - #endif -diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c -index f9c32ba..8540068 100644 ---- a/arch/arm/mm/ioremap.c -+++ b/arch/arm/mm/ioremap.c -@@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) - unsigned int mtype; - - if (cached) -- mtype = MT_MEMORY_RWX; -+ mtype = MT_MEMORY_RX; - else -- mtype = MT_MEMORY_RWX_NONCACHED; -+ mtype = MT_MEMORY_RX_NONCACHED; - - return __arm_ioremap_caller(phys_addr, size, mtype, - __builtin_return_address(0)); -diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c -index 5e85ed3..b10a7ed 100644 ---- a/arch/arm/mm/mmap.c -+++ b/arch/arm/mm/mmap.c -@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - struct vm_area_struct *vma; - int do_align = 0; - int aliasing = cache_is_vipt_aliasing(); -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); - struct vm_unmapped_area_info info; - - /* -@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - if (len > TASK_SIZE) - return -ENOMEM; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - if (do_align) - addr = COLOUR_ALIGN(addr, pgoff); -@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) - return addr; - } - -@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, - info.high_limit = TASK_SIZE; - info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; - info.align_offset = pgoff << PAGE_SHIFT; -+ info.threadstack_offset = offset; - return vm_unmapped_area(&info); - } - -@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - unsigned long addr = addr0; - int do_align = 0; - int aliasing = cache_is_vipt_aliasing(); -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); - struct vm_unmapped_area_info info; - - /* -@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - return addr; - } - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - /* requesting a specific address */ - if (addr) { - if (do_align) -@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - else - addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) - return addr; - } - -@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - info.high_limit = mm->mmap_base; - info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; - info.align_offset = pgoff << PAGE_SHIFT; -+ info.threadstack_offset = offset; - addr = vm_unmapped_area(&info); - - /* -@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - { - unsigned long random_factor = 0UL; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - /* 8 bits of randomness in 20 address space bits */ - if ((current->flags & PF_RANDOMIZE) && - !(current->personality & ADDR_NO_RANDOMIZE)) -@@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - - if (mmap_is_legacy()) { - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area; - } else { - mm->mmap_base = mmap_base(random_factor); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } - } -diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c -index f15c22e..d830561 100644 ---- a/arch/arm/mm/mmu.c -+++ b/arch/arm/mm/mmu.c -@@ -39,6 +39,22 @@ - #include "mm.h" - #include "tcm.h" - -+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) -+void modify_domain(unsigned int dom, unsigned int type) -+{ -+ struct thread_info *thread = current_thread_info(); -+ unsigned int domain = thread->cpu_domain; -+ /* -+ * DOMAIN_MANAGER might be defined to some other value, -+ * use the arch-defined constant -+ */ -+ domain &= ~domain_val(dom, 3); -+ thread->cpu_domain = domain | domain_val(dom, type); -+ set_domain(thread->cpu_domain); -+} -+EXPORT_SYMBOL(modify_domain); -+#endif -+ - /* - * empty_zero_page is a special page that is used for - * zero-initialized data and COW. -@@ -235,7 +251,15 @@ __setup("noalign", noalign_setup); - #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE - #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE - --static struct mem_type mem_types[] = { -+#ifdef CONFIG_PAX_KERNEXEC -+#define L_PTE_KERNEXEC L_PTE_RDONLY -+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY -+#else -+#define L_PTE_KERNEXEC L_PTE_DIRTY -+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE -+#endif -+ -+static struct mem_type mem_types[] __read_only = { - [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ - .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | - L_PTE_SHARED, -@@ -264,19 +288,19 @@ static struct mem_type mem_types[] = { - .prot_sect = PROT_SECT_DEVICE, - .domain = DOMAIN_IO, - }, -- [MT_UNCACHED] = { -+ [MT_UNCACHED_RW] = { - .prot_pte = PROT_PTE_DEVICE, - .prot_l1 = PMD_TYPE_TABLE, - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, - .domain = DOMAIN_IO, - }, -- [MT_CACHECLEAN] = { -- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, -+ [MT_CACHECLEAN_RO] = { -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY, - .domain = DOMAIN_KERNEL, - }, - #ifndef CONFIG_ARM_LPAE -- [MT_MINICLEAN] = { -- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, -+ [MT_MINICLEAN_RO] = { -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY, - .domain = DOMAIN_KERNEL, - }, - #endif -@@ -284,15 +308,15 @@ static struct mem_type mem_types[] = { - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_RDONLY, - .prot_l1 = PMD_TYPE_TABLE, -- .domain = DOMAIN_USER, -+ .domain = DOMAIN_VECTORS, - }, - [MT_HIGH_VECTORS] = { - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_USER | L_PTE_RDONLY, - .prot_l1 = PMD_TYPE_TABLE, -- .domain = DOMAIN_USER, -+ .domain = DOMAIN_VECTORS, - }, -- [MT_MEMORY_RWX] = { -+ [__MT_MEMORY_RWX] = { - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, - .prot_l1 = PMD_TYPE_TABLE, - .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, -@@ -305,17 +329,30 @@ static struct mem_type mem_types[] = { - .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, - .domain = DOMAIN_KERNEL, - }, -- [MT_ROM] = { -- .prot_sect = PMD_TYPE_SECT, -+ [MT_MEMORY_RX] = { -+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC, -+ .prot_l1 = PMD_TYPE_TABLE, -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, -+ .domain = DOMAIN_KERNEL, -+ }, -+ [MT_ROM_RX] = { -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY, - .domain = DOMAIN_KERNEL, - }, -- [MT_MEMORY_RWX_NONCACHED] = { -+ [MT_MEMORY_RW_NONCACHED] = { - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_MT_BUFFERABLE, - .prot_l1 = PMD_TYPE_TABLE, - .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, - .domain = DOMAIN_KERNEL, - }, -+ [MT_MEMORY_RX_NONCACHED] = { -+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC | -+ L_PTE_MT_BUFFERABLE, -+ .prot_l1 = PMD_TYPE_TABLE, -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, -+ .domain = DOMAIN_KERNEL, -+ }, - [MT_MEMORY_RW_DTCM] = { - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_XN, -@@ -323,9 +360,10 @@ static struct mem_type mem_types[] = { - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, - .domain = DOMAIN_KERNEL, - }, -- [MT_MEMORY_RWX_ITCM] = { -- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, -+ [MT_MEMORY_RX_ITCM] = { -+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC, - .prot_l1 = PMD_TYPE_TABLE, -+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC, - .domain = DOMAIN_KERNEL, - }, - [MT_MEMORY_RW_SO] = { -@@ -534,9 +572,14 @@ static void __init build_mem_type_table(void) - * Mark cache clean areas and XIP ROM read only - * from SVC mode and no access from userspace. - */ -- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; -- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; -- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; -+ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; -+#ifdef CONFIG_PAX_KERNEXEC -+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; -+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; -+ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; -+#endif -+ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; -+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; - #endif - - if (is_smp()) { -@@ -552,13 +595,17 @@ static void __init build_mem_type_table(void) - mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; - mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; - mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; -- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; -- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; -+ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; -+ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; - mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; - mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; -+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S; -+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED; - mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; -- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; -- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; -+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S; -+ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED; -+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S; -+ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED; - } - } - -@@ -569,15 +616,20 @@ static void __init build_mem_type_table(void) - if (cpu_arch >= CPU_ARCH_ARMv6) { - if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { - /* Non-cacheable Normal is XCB = 001 */ -- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= -+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= -+ PMD_SECT_BUFFERED; -+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= - PMD_SECT_BUFFERED; - } else { - /* For both ARMv6 and non-TEX-remapping ARMv7 */ -- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= -+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= -+ PMD_SECT_TEX(1); -+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= - PMD_SECT_TEX(1); - } - } else { -- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; -+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; -+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; - } - - #ifdef CONFIG_ARM_LPAE -@@ -593,6 +645,8 @@ static void __init build_mem_type_table(void) - vecs_pgprot |= PTE_EXT_AF; - #endif - -+ user_pgprot |= __supported_pte_mask; -+ - for (i = 0; i < 16; i++) { - pteval_t v = pgprot_val(protection_map[i]); - protection_map[i] = __pgprot(v | user_pgprot); -@@ -610,21 +664,24 @@ static void __init build_mem_type_table(void) - - mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; - mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; -- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; -- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; -+ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; -+ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot; - mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; - mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; -+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd; -+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot; - mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; -- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; -- mem_types[MT_ROM].prot_sect |= cp->pmd; -+ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask; -+ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask; -+ mem_types[MT_ROM_RX].prot_sect |= cp->pmd; - - switch (cp->pmd) { - case PMD_SECT_WT: -- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; -+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT; - break; - case PMD_SECT_WB: - case PMD_SECT_WBWA: -- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; -+ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB; - break; - } - pr_info("Memory policy: %sData cache %s\n", -@@ -842,7 +899,7 @@ static void __init create_mapping(struct map_desc *md) - return; - } - -- if ((md->type == MT_DEVICE || md->type == MT_ROM) && -+ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) && - md->virtual >= PAGE_OFFSET && - (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { - printk(KERN_WARNING "BUG: mapping for 0x%08llx" -@@ -1257,18 +1314,15 @@ void __init arm_mm_memblock_reserve(void) - * called function. This means you can't use any function or debugging - * method which may touch any device, otherwise the kernel _will_ crash. - */ -+ -+static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE); -+ - static void __init devicemaps_init(const struct machine_desc *mdesc) - { - struct map_desc map; - unsigned long addr; -- void *vectors; - -- /* -- * Allocate the vector page early. -- */ -- vectors = early_alloc(PAGE_SIZE * 2); -- -- early_trap_init(vectors); -+ early_trap_init(&vectors); - - for (addr = VMALLOC_START; addr; addr += PMD_SIZE) - pmd_clear(pmd_off_k(addr)); -@@ -1281,7 +1335,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) - map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); - map.virtual = MODULES_VADDR; - map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; -- map.type = MT_ROM; -+ map.type = MT_ROM_RX; - create_mapping(&map); - #endif - -@@ -1292,14 +1346,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) - map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); - map.virtual = FLUSH_BASE; - map.length = SZ_1M; -- map.type = MT_CACHECLEAN; -+ map.type = MT_CACHECLEAN_RO; - create_mapping(&map); - #endif - #ifdef FLUSH_BASE_MINICACHE - map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); - map.virtual = FLUSH_BASE_MINICACHE; - map.length = SZ_1M; -- map.type = MT_MINICLEAN; -+ map.type = MT_MINICLEAN_RO; - create_mapping(&map); - #endif - -@@ -1308,7 +1362,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) - * location (0xffff0000). If we aren't using high-vectors, also - * create a mapping at the low-vectors virtual address. - */ -- map.pfn = __phys_to_pfn(virt_to_phys(vectors)); -+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors)); - map.virtual = 0xffff0000; - map.length = PAGE_SIZE; - #ifdef CONFIG_KUSER_HELPERS -@@ -1365,8 +1419,10 @@ static void __init kmap_init(void) - static void __init map_lowmem(void) - { - struct memblock_region *reg; -+#ifndef CONFIG_PAX_KERNEXEC - unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); - unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); -+#endif - - /* Map all the lowmem memory banks. */ - for_each_memblock(memory, reg) { -@@ -1379,11 +1435,48 @@ static void __init map_lowmem(void) - if (start >= end) - break; - -+#ifdef CONFIG_PAX_KERNEXEC -+ map.pfn = __phys_to_pfn(start); -+ map.virtual = __phys_to_virt(start); -+ map.length = end - start; -+ -+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) { -+ struct map_desc kernel; -+ struct map_desc initmap; -+ -+ /* when freeing initmem we will make this RW */ -+ initmap.pfn = __phys_to_pfn(__pa(__init_begin)); -+ initmap.virtual = (unsigned long)__init_begin; -+ initmap.length = _sdata - __init_begin; -+ initmap.type = __MT_MEMORY_RWX; -+ create_mapping(&initmap); -+ -+ /* when freeing initmem we will make this RX */ -+ kernel.pfn = __phys_to_pfn(__pa(_stext)); -+ kernel.virtual = (unsigned long)_stext; -+ kernel.length = __init_begin - _stext; -+ kernel.type = __MT_MEMORY_RWX; -+ create_mapping(&kernel); -+ -+ if (map.virtual < (unsigned long)_stext) { -+ map.length = (unsigned long)_stext - map.virtual; -+ map.type = __MT_MEMORY_RWX; -+ create_mapping(&map); -+ } -+ -+ map.pfn = __phys_to_pfn(__pa(_sdata)); -+ map.virtual = (unsigned long)_sdata; -+ map.length = end - __pa(_sdata); -+ } -+ -+ map.type = MT_MEMORY_RW; -+ create_mapping(&map); -+#else - if (end < kernel_x_start || start >= kernel_x_end) { - map.pfn = __phys_to_pfn(start); - map.virtual = __phys_to_virt(start); - map.length = end - start; -- map.type = MT_MEMORY_RWX; -+ map.type = __MT_MEMORY_RWX; - - create_mapping(&map); - } else { -@@ -1400,7 +1493,7 @@ static void __init map_lowmem(void) - map.pfn = __phys_to_pfn(kernel_x_start); - map.virtual = __phys_to_virt(kernel_x_start); - map.length = kernel_x_end - kernel_x_start; -- map.type = MT_MEMORY_RWX; -+ map.type = __MT_MEMORY_RWX; - - create_mapping(&map); - -@@ -1413,6 +1506,7 @@ static void __init map_lowmem(void) - create_mapping(&map); - } - } -+#endif - } - } - -diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c -index 6adf591..00ad1e9 100644 ---- a/arch/arm/net/bpf_jit_32.c -+++ b/arch/arm/net/bpf_jit_32.c -@@ -73,32 +73,52 @@ struct jit_ctx { - - int bpf_jit_enable __read_mostly; - --static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) -+static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, -+ unsigned int size) -+{ -+ void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); -+ -+ if (!ptr) -+ return -EFAULT; -+ memcpy(ret, ptr, size); -+ return 0; -+} -+ -+static u64 jit_get_skb_b(struct sk_buff *skb, int offset) - { - u8 ret; - int err; - -- err = skb_copy_bits(skb, offset, &ret, 1); -+ if (offset < 0) -+ err = call_neg_helper(skb, offset, &ret, 1); -+ else -+ err = skb_copy_bits(skb, offset, &ret, 1); - - return (u64)err << 32 | ret; - } - --static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) -+static u64 jit_get_skb_h(struct sk_buff *skb, int offset) - { - u16 ret; - int err; - -- err = skb_copy_bits(skb, offset, &ret, 2); -+ if (offset < 0) -+ err = call_neg_helper(skb, offset, &ret, 2); -+ else -+ err = skb_copy_bits(skb, offset, &ret, 2); - - return (u64)err << 32 | ntohs(ret); - } - --static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) -+static u64 jit_get_skb_w(struct sk_buff *skb, int offset) - { - u32 ret; - int err; - -- err = skb_copy_bits(skb, offset, &ret, 4); -+ if (offset < 0) -+ err = call_neg_helper(skb, offset, &ret, 4); -+ else -+ err = skb_copy_bits(skb, offset, &ret, 4); - - return (u64)err << 32 | ntohl(ret); - } -@@ -523,9 +543,6 @@ static int build_body(struct jit_ctx *ctx) - case BPF_S_LD_B_ABS: - load_order = 0; - load: -- /* the interpreter will deal with the negative K */ -- if ((int)k < 0) -- return -ENOTSUPP; - emit_mov_i(r_off, k, ctx); - load_common: - ctx->seen |= SEEN_DATA | SEEN_CALL; -@@ -534,12 +551,24 @@ load_common: - emit(ARM_SUB_I(r_scratch, r_skb_hl, - 1 << load_order), ctx); - emit(ARM_CMP_R(r_scratch, r_off), ctx); -- condt = ARM_COND_HS; -+ condt = ARM_COND_GE; - } else { - emit(ARM_CMP_R(r_skb_hl, r_off), ctx); - condt = ARM_COND_HI; - } - -+ /* -+ * test for negative offset, only if we are -+ * currently scheduled to take the fast -+ * path. this will update the flags so that -+ * the slowpath instruction are ignored if the -+ * offset is negative. -+ * -+ * for loard_order == 0 the HI condition will -+ * make loads at offset 0 take the slow path too. -+ */ -+ _emit(condt, ARM_CMP_I(r_off, 0), ctx); -+ - _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), - ctx); - -diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c -index 5b217f4..c23f40e 100644 ---- a/arch/arm/plat-iop/setup.c -+++ b/arch/arm/plat-iop/setup.c -@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = { - .virtual = IOP3XX_PERIPHERAL_VIRT_BASE, - .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE), - .length = IOP3XX_PERIPHERAL_SIZE, -- .type = MT_UNCACHED, -+ .type = MT_UNCACHED_RW, - }, - }; - -diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c -index a5bc92d..0bb4730 100644 ---- a/arch/arm/plat-omap/sram.c -+++ b/arch/arm/plat-omap/sram.c -@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size, - * Looks like we need to preserve some bootloader code at the - * beginning of SRAM for jumping to flash for reboot to work... - */ -+ pax_open_kernel(); - memset_io(omap_sram_base + omap_sram_skip, 0, - omap_sram_size - omap_sram_skip); -+ pax_close_kernel(); - } -diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h -index ce6d763..cfea917 100644 ---- a/arch/arm/plat-samsung/include/plat/dma-ops.h -+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h -@@ -47,7 +47,7 @@ struct samsung_dma_ops { - int (*started)(unsigned ch); - int (*flush)(unsigned ch); - int (*stop)(unsigned ch); --}; -+} __no_const; - - extern void *samsung_dmadev_get_ops(void); - extern void *s3c_dma_get_ops(void); -diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h -index 409ca37..10c87ad 100644 ---- a/arch/arm64/include/asm/barrier.h -+++ b/arch/arm64/include/asm/barrier.h -@@ -40,7 +40,7 @@ - do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ -- ACCESS_ONCE(*p) = (v); \ -+ ACCESS_ONCE_RW(*p) = (v); \ - } while (0) - - #define smp_load_acquire(p) \ -diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h -index 6c0f684..5faea9d 100644 ---- a/arch/arm64/include/asm/uaccess.h -+++ b/arch/arm64/include/asm/uaccess.h -@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs) - flag; \ - }) - -+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) - #define access_ok(type, addr, size) __range_ok(addr, size) - #define user_addr_max get_fs - -diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h -index c3a58a1..78fbf54 100644 ---- a/arch/avr32/include/asm/cache.h -+++ b/arch/avr32/include/asm/cache.h -@@ -1,8 +1,10 @@ - #ifndef __ASM_AVR32_CACHE_H - #define __ASM_AVR32_CACHE_H - -+#include <linux/const.h> -+ - #define L1_CACHE_SHIFT 5 --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - /* - * Memory returned by kmalloc() may be used for DMA, so we must make -diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h -index d232888..87c8df1 100644 ---- a/arch/avr32/include/asm/elf.h -+++ b/arch/avr32/include/asm/elf.h -@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t; - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - --#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) -+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE 0x00001000UL -+ -+#define PAX_DELTA_MMAP_LEN 15 -+#define PAX_DELTA_STACK_LEN 15 -+#endif - - /* This yields a mask that user programs can use to figure out what - instruction set this CPU supports. This could be done in user space, -diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h -index 479330b..53717a8 100644 ---- a/arch/avr32/include/asm/kmap_types.h -+++ b/arch/avr32/include/asm/kmap_types.h -@@ -2,9 +2,9 @@ - #define __ASM_AVR32_KMAP_TYPES_H - - #ifdef CONFIG_DEBUG_HIGHMEM --# define KM_TYPE_NR 29 -+# define KM_TYPE_NR 30 - #else --# define KM_TYPE_NR 14 -+# define KM_TYPE_NR 15 - #endif - - #endif /* __ASM_AVR32_KMAP_TYPES_H */ -diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c -index d223a8b..69c5210 100644 ---- a/arch/avr32/mm/fault.c -+++ b/arch/avr32/mm/fault.c -@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) - - int exception_trace = 1; - -+#ifdef CONFIG_PAX_PAGEEXEC -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 20; i++) { -+ unsigned char c; -+ if (get_user(c, (unsigned char *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%02x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - /* - * This routine handles page faults. It determines the address and the - * problem, and then passes it off to one of the appropriate routines. -@@ -178,6 +195,16 @@ bad_area: - up_read(&mm->mmap_sem); - - if (user_mode(regs)) { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (mm->pax_flags & MF_PAX_PAGEEXEC) { -+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) { -+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp); -+ do_group_exit(SIGKILL); -+ } -+ } -+#endif -+ - if (exception_trace && printk_ratelimit()) - printk("%s%s[%d]: segfault at %08lx pc %08lx " - "sp %08lx ecr %lu\n", -diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h -index 568885a..f8008df 100644 ---- a/arch/blackfin/include/asm/cache.h -+++ b/arch/blackfin/include/asm/cache.h -@@ -7,6 +7,7 @@ - #ifndef __ARCH_BLACKFIN_CACHE_H - #define __ARCH_BLACKFIN_CACHE_H - -+#include <linux/const.h> - #include <linux/linkage.h> /* for asmlinkage */ - - /* -@@ -14,7 +15,7 @@ - * Blackfin loads 32 bytes for cache - */ - #define L1_CACHE_SHIFT 5 --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - #define SMP_CACHE_BYTES L1_CACHE_BYTES - - #define ARCH_DMA_MINALIGN L1_CACHE_BYTES -diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h -index aea2718..3639a60 100644 ---- a/arch/cris/include/arch-v10/arch/cache.h -+++ b/arch/cris/include/arch-v10/arch/cache.h -@@ -1,8 +1,9 @@ - #ifndef _ASM_ARCH_CACHE_H - #define _ASM_ARCH_CACHE_H - -+#include <linux/const.h> - /* Etrax 100LX have 32-byte cache-lines. */ --#define L1_CACHE_BYTES 32 - #define L1_CACHE_SHIFT 5 -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #endif /* _ASM_ARCH_CACHE_H */ -diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h -index 7caf25d..ee65ac5 100644 ---- a/arch/cris/include/arch-v32/arch/cache.h -+++ b/arch/cris/include/arch-v32/arch/cache.h -@@ -1,11 +1,12 @@ - #ifndef _ASM_CRIS_ARCH_CACHE_H - #define _ASM_CRIS_ARCH_CACHE_H - -+#include <linux/const.h> - #include <arch/hwregs/dma.h> - - /* A cache-line is 32 bytes. */ --#define L1_CACHE_BYTES 32 - #define L1_CACHE_SHIFT 5 -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) - -diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h -index b86329d..6709906 100644 ---- a/arch/frv/include/asm/atomic.h -+++ b/arch/frv/include/asm/atomic.h -@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v) - #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter)) - #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter)) - -+#define atomic64_read_unchecked(v) atomic64_read(v) -+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) -+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) -+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) -+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) -+#define atomic64_inc_unchecked(v) atomic64_inc(v) -+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) -+#define atomic64_dec_unchecked(v) atomic64_dec(v) -+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) -+ - static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) - { - int c, old; -diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h -index 2797163..c2a401df9 100644 ---- a/arch/frv/include/asm/cache.h -+++ b/arch/frv/include/asm/cache.h -@@ -12,10 +12,11 @@ - #ifndef __ASM_CACHE_H - #define __ASM_CACHE_H - -+#include <linux/const.h> - - /* bytes per L1 cache line */ - #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT) --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) - #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) -diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h -index 43901f2..0d8b865 100644 ---- a/arch/frv/include/asm/kmap_types.h -+++ b/arch/frv/include/asm/kmap_types.h -@@ -2,6 +2,6 @@ - #ifndef _ASM_KMAP_TYPES_H - #define _ASM_KMAP_TYPES_H - --#define KM_TYPE_NR 17 -+#define KM_TYPE_NR 18 - - #endif -diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c -index 836f147..4cf23f5 100644 ---- a/arch/frv/mm/elf-fdpic.c -+++ b/arch/frv/mm/elf-fdpic.c -@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - { - struct vm_area_struct *vma; - struct vm_unmapped_area_info info; -+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); - - if (len > TASK_SIZE) - return -ENOMEM; -@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - if (addr) { - addr = PAGE_ALIGN(addr); - vma = find_vma(current->mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) - goto success; - } - -@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - info.high_limit = (current->mm->start_stack - 0x00200000); - info.align_mask = 0; - info.align_offset = 0; -+ info.threadstack_offset = offset; - addr = vm_unmapped_area(&info); - if (!(addr & ~PAGE_MASK)) - goto success; -diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h -index f4ca594..adc72fd6 100644 ---- a/arch/hexagon/include/asm/cache.h -+++ b/arch/hexagon/include/asm/cache.h -@@ -21,9 +21,11 @@ - #ifndef __ASM_CACHE_H - #define __ASM_CACHE_H - -+#include <linux/const.h> -+ - /* Bytes per L1 cache line */ --#define L1_CACHE_SHIFT (5) --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_SHIFT 5 -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #define __cacheline_aligned __aligned(L1_CACHE_BYTES) - #define ____cacheline_aligned __aligned(L1_CACHE_BYTES) -diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig -index 0c8e553..112d734 100644 ---- a/arch/ia64/Kconfig -+++ b/arch/ia64/Kconfig -@@ -544,6 +544,7 @@ source "drivers/sn/Kconfig" - config KEXEC - bool "kexec system call" - depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) -+ depends on !GRKERNSEC_KMEM - help - kexec is a system call that implements the ability to shutdown your - current kernel, and to start another kernel. It is like a reboot -diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile -index f37238f..810b95f 100644 ---- a/arch/ia64/Makefile -+++ b/arch/ia64/Makefile -@@ -99,5 +99,6 @@ endef - archprepare: make_nr_irqs_h FORCE - PHONY += make_nr_irqs_h FORCE - -+make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) - make_nr_irqs_h: FORCE - $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h -diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h -index 6e6fe18..a6ae668 100644 ---- a/arch/ia64/include/asm/atomic.h -+++ b/arch/ia64/include/asm/atomic.h -@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v) - #define atomic64_inc(v) atomic64_add(1, (v)) - #define atomic64_dec(v) atomic64_sub(1, (v)) - -+#define atomic64_read_unchecked(v) atomic64_read(v) -+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) -+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) -+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) -+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) -+#define atomic64_inc_unchecked(v) atomic64_inc(v) -+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) -+#define atomic64_dec_unchecked(v) atomic64_dec(v) -+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) -+ - /* Atomic operations are already serializing */ - #define smp_mb__before_atomic_dec() barrier() - #define smp_mb__after_atomic_dec() barrier() -diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h -index d0a69aa..142f878 100644 ---- a/arch/ia64/include/asm/barrier.h -+++ b/arch/ia64/include/asm/barrier.h -@@ -64,7 +64,7 @@ - do { \ - compiletime_assert_atomic_type(*p); \ - barrier(); \ -- ACCESS_ONCE(*p) = (v); \ -+ ACCESS_ONCE_RW(*p) = (v); \ - } while (0) - - #define smp_load_acquire(p) \ -diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h -index 988254a..e1ee885 100644 ---- a/arch/ia64/include/asm/cache.h -+++ b/arch/ia64/include/asm/cache.h -@@ -1,6 +1,7 @@ - #ifndef _ASM_IA64_CACHE_H - #define _ASM_IA64_CACHE_H - -+#include <linux/const.h> - - /* - * Copyright (C) 1998-2000 Hewlett-Packard Co -@@ -9,7 +10,7 @@ - - /* Bytes per L1 (data) cache line. */ - #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #ifdef CONFIG_SMP - # define SMP_CACHE_SHIFT L1_CACHE_SHIFT -diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h -index 5a83c5c..4d7f553 100644 ---- a/arch/ia64/include/asm/elf.h -+++ b/arch/ia64/include/asm/elf.h -@@ -42,6 +42,13 @@ - */ - #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) -+ -+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) -+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) -+#endif -+ - #define PT_IA_64_UNWIND 0x70000001 - - /* IA-64 relocations: */ -diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h -index 5767cdf..7462574 100644 ---- a/arch/ia64/include/asm/pgalloc.h -+++ b/arch/ia64/include/asm/pgalloc.h -@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) - pgd_val(*pgd_entry) = __pa(pud); - } - -+static inline void -+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) -+{ -+ pgd_populate(mm, pgd_entry, pud); -+} -+ - static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) - { - return quicklist_alloc(0, GFP_KERNEL, NULL); -@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) - pud_val(*pud_entry) = __pa(pmd); - } - -+static inline void -+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) -+{ -+ pud_populate(mm, pud_entry, pmd); -+} -+ - static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) - { - return quicklist_alloc(0, GFP_KERNEL, NULL); -diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h -index 7935115..c0eca6a 100644 ---- a/arch/ia64/include/asm/pgtable.h -+++ b/arch/ia64/include/asm/pgtable.h -@@ -12,7 +12,7 @@ - * David Mosberger-Tang <davidm@hpl.hp.com> - */ - -- -+#include <linux/const.h> - #include <asm/mman.h> - #include <asm/page.h> - #include <asm/processor.h> -@@ -142,6 +142,17 @@ - #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) - #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) - #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) -+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) -+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) -+#else -+# define PAGE_SHARED_NOEXEC PAGE_SHARED -+# define PAGE_READONLY_NOEXEC PAGE_READONLY -+# define PAGE_COPY_NOEXEC PAGE_COPY -+#endif -+ - #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) - #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) - #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) -diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h -index 45698cd..e8e2dbc 100644 ---- a/arch/ia64/include/asm/spinlock.h -+++ b/arch/ia64/include/asm/spinlock.h -@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) - unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; - - asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); -- ACCESS_ONCE(*p) = (tmp + 2) & ~1; -+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1; - } - - static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) -diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h -index 449c8c0..3d4b1e9 100644 ---- a/arch/ia64/include/asm/uaccess.h -+++ b/arch/ia64/include/asm/uaccess.h -@@ -70,6 +70,7 @@ - && ((segment).seg == KERNEL_DS.seg \ - || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ - }) -+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) - #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) - - /* -@@ -240,12 +241,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use - static inline unsigned long - __copy_to_user (void __user *to, const void *from, unsigned long count) - { -+ if (count > INT_MAX) -+ return count; -+ -+ if (!__builtin_constant_p(count)) -+ check_object_size(from, count, true); -+ - return __copy_user(to, (__force void __user *) from, count); - } - - static inline unsigned long - __copy_from_user (void *to, const void __user *from, unsigned long count) - { -+ if (count > INT_MAX) -+ return count; -+ -+ if (!__builtin_constant_p(count)) -+ check_object_size(to, count, false); -+ - return __copy_user((__force void __user *) to, from, count); - } - -@@ -255,10 +268,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) - ({ \ - void __user *__cu_to = (to); \ - const void *__cu_from = (from); \ -- long __cu_len = (n); \ -+ unsigned long __cu_len = (n); \ - \ -- if (__access_ok(__cu_to, __cu_len, get_fs())) \ -+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \ -+ if (!__builtin_constant_p(n)) \ -+ check_object_size(__cu_from, __cu_len, true); \ - __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ -+ } \ - __cu_len; \ - }) - -@@ -266,11 +282,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) - ({ \ - void *__cu_to = (to); \ - const void __user *__cu_from = (from); \ -- long __cu_len = (n); \ -+ unsigned long __cu_len = (n); \ - \ - __chk_user_ptr(__cu_from); \ -- if (__access_ok(__cu_from, __cu_len, get_fs())) \ -+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \ -+ if (!__builtin_constant_p(n)) \ -+ check_object_size(__cu_to, __cu_len, false); \ - __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ -+ } \ - __cu_len; \ - }) - -diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c -index 24603be..948052d 100644 ---- a/arch/ia64/kernel/module.c -+++ b/arch/ia64/kernel/module.c -@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt) - void - module_free (struct module *mod, void *module_region) - { -- if (mod && mod->arch.init_unw_table && -- module_region == mod->module_init) { -+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) { - unw_remove_unwind_table(mod->arch.init_unw_table); - mod->arch.init_unw_table = NULL; - } -@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, - } - - static inline int -+in_init_rx (const struct module *mod, uint64_t addr) -+{ -+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx; -+} -+ -+static inline int -+in_init_rw (const struct module *mod, uint64_t addr) -+{ -+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw; -+} -+ -+static inline int - in_init (const struct module *mod, uint64_t addr) - { -- return addr - (uint64_t) mod->module_init < mod->init_size; -+ return in_init_rx(mod, addr) || in_init_rw(mod, addr); -+} -+ -+static inline int -+in_core_rx (const struct module *mod, uint64_t addr) -+{ -+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx; -+} -+ -+static inline int -+in_core_rw (const struct module *mod, uint64_t addr) -+{ -+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw; - } - - static inline int - in_core (const struct module *mod, uint64_t addr) - { -- return addr - (uint64_t) mod->module_core < mod->core_size; -+ return in_core_rx(mod, addr) || in_core_rw(mod, addr); - } - - static inline int -@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, - break; - - case RV_BDREL: -- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); -+ if (in_init_rx(mod, val)) -+ val -= (uint64_t) mod->module_init_rx; -+ else if (in_init_rw(mod, val)) -+ val -= (uint64_t) mod->module_init_rw; -+ else if (in_core_rx(mod, val)) -+ val -= (uint64_t) mod->module_core_rx; -+ else if (in_core_rw(mod, val)) -+ val -= (uint64_t) mod->module_core_rw; - break; - - case RV_LTV: -@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind - * addresses have been selected... - */ - uint64_t gp; -- if (mod->core_size > MAX_LTOFF) -+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF) - /* - * This takes advantage of fact that SHF_ARCH_SMALL gets allocated - * at the end of the module. - */ -- gp = mod->core_size - MAX_LTOFF / 2; -+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2; - else -- gp = mod->core_size / 2; -- gp = (uint64_t) mod->module_core + ((gp + 7) & -8); -+ gp = (mod->core_size_rx + mod->core_size_rw) / 2; -+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8); - mod->arch.gp = gp; - DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); - } -diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c -index ab33328..f39506c 100644 ---- a/arch/ia64/kernel/palinfo.c -+++ b/arch/ia64/kernel/palinfo.c -@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb, - return NOTIFY_OK; - } - --static struct notifier_block __refdata palinfo_cpu_notifier = -+static struct notifier_block palinfo_cpu_notifier = - { - .notifier_call = palinfo_cpu_callback, - .priority = 0, -diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c -index 41e33f8..65180b2a 100644 ---- a/arch/ia64/kernel/sys_ia64.c -+++ b/arch/ia64/kernel/sys_ia64.c -@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len - unsigned long align_mask = 0; - struct mm_struct *mm = current->mm; - struct vm_unmapped_area_info info; -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); - - if (len > RGN_MAP_LIMIT) - return -ENOMEM; -@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len - if (REGION_NUMBER(addr) == RGN_HPAGE) - addr = 0; - #endif -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ addr = mm->free_area_cache; -+ else -+#endif -+ - if (!addr) - addr = TASK_UNMAPPED_BASE; - -@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len - info.high_limit = TASK_SIZE; - info.align_mask = align_mask; - info.align_offset = 0; -+ info.threadstack_offset = offset; - return vm_unmapped_area(&info); - } - -diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S -index 84f8a52..7c76178 100644 ---- a/arch/ia64/kernel/vmlinux.lds.S -+++ b/arch/ia64/kernel/vmlinux.lds.S -@@ -192,7 +192,7 @@ SECTIONS { - /* Per-cpu data: */ - . = ALIGN(PERCPU_PAGE_SIZE); - PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu) -- __phys_per_cpu_start = __per_cpu_load; -+ __phys_per_cpu_start = per_cpu_load; - /* - * ensure percpu data fits - * into percpu page size -diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c -index ba5ba7a..36e9d3a 100644 ---- a/arch/ia64/mm/fault.c -+++ b/arch/ia64/mm/fault.c -@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address) - return pte_present(pte); - } - -+#ifdef CONFIG_PAX_PAGEEXEC -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 8; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - # define VM_READ_BIT 0 - # define VM_WRITE_BIT 1 - # define VM_EXEC_BIT 2 -@@ -151,8 +168,21 @@ retry: - if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE)))) - goto bad_area; - -- if ((vma->vm_flags & mask) != mask) -+ if ((vma->vm_flags & mask) != mask) { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { -+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip) -+ goto bad_area; -+ -+ up_read(&mm->mmap_sem); -+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - goto bad_area; -+ } - - /* - * If for any reason at all we couldn't handle the fault, make -diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c -index 76069c1..c2aa816 100644 ---- a/arch/ia64/mm/hugetlbpage.c -+++ b/arch/ia64/mm/hugetlbpage.c -@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u - unsigned long pgoff, unsigned long flags) - { - struct vm_unmapped_area_info info; -+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags); - - if (len > RGN_MAP_LIMIT) - return -ENOMEM; -@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u - info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT; - info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1); - info.align_offset = 0; -+ info.threadstack_offset = offset; - return vm_unmapped_area(&info); - } - -diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c -index 25c3502..560dae7 100644 ---- a/arch/ia64/mm/init.c -+++ b/arch/ia64/mm/init.c -@@ -120,6 +120,19 @@ ia64_init_addr_space (void) - vma->vm_start = current->thread.rbs_bot & PAGE_MASK; - vma->vm_end = vma->vm_start + PAGE_SIZE; - vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) { -+ vma->vm_flags &= ~VM_EXEC; -+ -+#ifdef CONFIG_PAX_MPROTECT -+ if (current->mm->pax_flags & MF_PAX_MPROTECT) -+ vma->vm_flags &= ~VM_MAYEXEC; -+#endif -+ -+ } -+#endif -+ - vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - down_write(¤t->mm->mmap_sem); - if (insert_vm_struct(current->mm, vma)) { -diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h -index 40b3ee98..8c2c112 100644 ---- a/arch/m32r/include/asm/cache.h -+++ b/arch/m32r/include/asm/cache.h -@@ -1,8 +1,10 @@ - #ifndef _ASM_M32R_CACHE_H - #define _ASM_M32R_CACHE_H - -+#include <linux/const.h> -+ - /* L1 cache line size */ - #define L1_CACHE_SHIFT 4 --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #endif /* _ASM_M32R_CACHE_H */ -diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c -index 82abd15..d95ae5d 100644 ---- a/arch/m32r/lib/usercopy.c -+++ b/arch/m32r/lib/usercopy.c -@@ -14,6 +14,9 @@ - unsigned long - __generic_copy_to_user(void __user *to, const void *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - prefetch(from); - if (access_ok(VERIFY_WRITE, to, n)) - __copy_user(to,from,n); -@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n) - unsigned long - __generic_copy_from_user(void *to, const void __user *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - prefetchw(to); - if (access_ok(VERIFY_READ, from, n)) - __copy_user_zeroing(to,from,n); -diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h -index 0395c51..5f26031 100644 ---- a/arch/m68k/include/asm/cache.h -+++ b/arch/m68k/include/asm/cache.h -@@ -4,9 +4,11 @@ - #ifndef __ARCH_M68K_CACHE_H - #define __ARCH_M68K_CACHE_H - -+#include <linux/const.h> -+ - /* bytes per L1 cache line */ - #define L1_CACHE_SHIFT 4 --#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #define ARCH_DMA_MINALIGN L1_CACHE_BYTES - -diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h -index 2d6f0de..de5f5ac 100644 ---- a/arch/metag/include/asm/barrier.h -+++ b/arch/metag/include/asm/barrier.h -@@ -89,7 +89,7 @@ static inline void fence(void) - do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ -- ACCESS_ONCE(*p) = (v); \ -+ ACCESS_ONCE_RW(*p) = (v); \ - } while (0) - - #define smp_load_acquire(p) \ -diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c -index 3c52fa6..11b2ad8 100644 ---- a/arch/metag/mm/hugetlbpage.c -+++ b/arch/metag/mm/hugetlbpage.c -@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len) - info.high_limit = TASK_SIZE; - info.align_mask = PAGE_MASK & HUGEPT_MASK; - info.align_offset = 0; -+ info.threadstack_offset = 0; - return vm_unmapped_area(&info); - } - -diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h -index 4efe96a..60e8699 100644 ---- a/arch/microblaze/include/asm/cache.h -+++ b/arch/microblaze/include/asm/cache.h -@@ -13,11 +13,12 @@ - #ifndef _ASM_MICROBLAZE_CACHE_H - #define _ASM_MICROBLAZE_CACHE_H - -+#include <linux/const.h> - #include <asm/registers.h> - - #define L1_CACHE_SHIFT 5 - /* word-granular cache in microblaze */ --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #define SMP_CACHE_BYTES L1_CACHE_BYTES - -diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig -index 95fa1f1..56a6fa2 100644 ---- a/arch/mips/Kconfig -+++ b/arch/mips/Kconfig -@@ -2298,6 +2298,7 @@ source "kernel/Kconfig.preempt" - - config KEXEC - bool "Kexec system call" -+ depends on !GRKERNSEC_KMEM - help - kexec is a system call that implements the ability to shutdown your - current kernel, and to start another kernel. It is like a reboot -diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c -index 02f2444..506969c 100644 ---- a/arch/mips/cavium-octeon/dma-octeon.c -+++ b/arch/mips/cavium-octeon/dma-octeon.c -@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size, - if (dma_release_from_coherent(dev, order, vaddr)) - return; - -- swiotlb_free_coherent(dev, size, vaddr, dma_handle); -+ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs); - } - - static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr) -diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h -index 7eed2f2..c4e385d 100644 ---- a/arch/mips/include/asm/atomic.h -+++ b/arch/mips/include/asm/atomic.h -@@ -21,15 +21,39 @@ - #include <asm/cmpxchg.h> - #include <asm/war.h> - -+#ifdef CONFIG_GENERIC_ATOMIC64 -+#include <asm-generic/atomic64.h> -+#endif -+ - #define ATOMIC_INIT(i) { (i) } - -+#ifdef CONFIG_64BIT -+#define _ASM_EXTABLE(from, to) \ -+" .section __ex_table,\"a\"\n" \ -+" .dword " #from ", " #to"\n" \ -+" .previous\n" -+#else -+#define _ASM_EXTABLE(from, to) \ -+" .section __ex_table,\"a\"\n" \ -+" .word " #from ", " #to"\n" \ -+" .previous\n" -+#endif -+ - /* - * atomic_read - read atomic variable - * @v: pointer of type atomic_t - * - * Atomically reads the value of @v. - */ --#define atomic_read(v) (*(volatile int *)&(v)->counter) -+static inline int atomic_read(const atomic_t *v) -+{ -+ return (*(volatile const int *) &v->counter); -+} -+ -+static inline int atomic_read_unchecked(const atomic_unchecked_t *v) -+{ -+ return (*(volatile const int *) &v->counter); -+} - - /* - * atomic_set - set atomic variable -@@ -38,7 +62,15 @@ - * - * Atomically sets the value of @v to @i. - */ --#define atomic_set(v, i) ((v)->counter = (i)) -+static inline void atomic_set(atomic_t *v, int i) -+{ -+ v->counter = i; -+} -+ -+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) -+{ -+ v->counter = i; -+} - - /* - * atomic_add - add integer to atomic variable -@@ -47,7 +79,67 @@ - * - * Atomically adds @i to @v. - */ --static __inline__ void atomic_add(int i, atomic_t * v) -+static __inline__ void atomic_add(int i, atomic_t *v) -+{ -+ int temp; -+ -+ if (kernel_uses_llsc && R10000_LLSC_WAR) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: ll %0, %1 # atomic_add \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "2: add %0, %2 \n" -+#else -+ " addu %0, %2 \n" -+#endif -+ " sc %0, %1 \n" -+ " beqzl %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "3: \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ " .set mips0 \n" -+ : "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else if (kernel_uses_llsc) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: ll %0, %1 # atomic_add \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "2: add %0, %2 \n" -+#else -+ " addu %0, %2 \n" -+#endif -+ " sc %0, %1 \n" -+ " beqz %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "3: \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ " .set mips0 \n" -+ : "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else { -+ unsigned long flags; -+ -+ raw_local_irq_save(flags); -+ __asm__ __volatile__( -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "1: add %0, %1 \n" -+ "2: \n" -+ _ASM_EXTABLE(1b, 2b) -+#else -+ " addu %0, %1 \n" -+#endif -+ : "+r" (v->counter) : "Ir" (i)); -+ raw_local_irq_restore(flags); -+ } -+} -+ -+static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v) - { - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; -@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v) - * - * Atomically subtracts @i from @v. - */ --static __inline__ void atomic_sub(int i, atomic_t * v) -+static __inline__ void atomic_sub(int i, atomic_t *v) -+{ -+ int temp; -+ -+ if (kernel_uses_llsc && R10000_LLSC_WAR) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: ll %0, %1 # atomic64_sub \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "2: sub %0, %2 \n" -+#else -+ " subu %0, %2 \n" -+#endif -+ " sc %0, %1 \n" -+ " beqzl %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "3: \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ " .set mips0 \n" -+ : "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else if (kernel_uses_llsc) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: ll %0, %1 # atomic64_sub \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "2: sub %0, %2 \n" -+#else -+ " subu %0, %2 \n" -+#endif -+ " sc %0, %1 \n" -+ " beqz %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "3: \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ " .set mips0 \n" -+ : "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else { -+ unsigned long flags; -+ -+ raw_local_irq_save(flags); -+ __asm__ __volatile__( -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "1: sub %0, %1 \n" -+ "2: \n" -+ _ASM_EXTABLE(1b, 2b) -+#else -+ " subu %0, %1 \n" -+#endif -+ : "+r" (v->counter) : "Ir" (i)); -+ raw_local_irq_restore(flags); -+ } -+} -+ -+static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v) - { - if (kernel_uses_llsc && R10000_LLSC_WAR) { - int temp; -@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v) - /* - * Same as above, but return the result value - */ --static __inline__ int atomic_add_return(int i, atomic_t * v) -+static __inline__ int atomic_add_return(int i, atomic_t *v) -+{ -+ int result; -+ int temp; -+ -+ smp_mb__before_llsc(); -+ -+ if (kernel_uses_llsc && R10000_LLSC_WAR) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: ll %1, %2 # atomic_add_return \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "2: add %0, %1, %3 \n" -+#else -+ " addu %0, %1, %3 \n" -+#endif -+ " sc %0, %2 \n" -+ " beqzl %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ " b 4f \n" -+ " .set noreorder \n" -+ "3: b 5f \n" -+ " move %0, %1 \n" -+ " .set reorder \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ "4: addu %0, %1, %3 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "5: \n" -+#endif -+ " .set mips0 \n" -+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else if (kernel_uses_llsc) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: ll %1, %2 # atomic_add_return \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "2: add %0, %1, %3 \n" -+#else -+ " addu %0, %1, %3 \n" -+#endif -+ " sc %0, %2 \n" -+ " bnez %0, 4f \n" -+ " b 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ " .set noreorder \n" -+ "3: b 5f \n" -+ " move %0, %1 \n" -+ " .set reorder \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ "4: addu %0, %1, %3 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "5: \n" -+#endif -+ " .set mips0 \n" -+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else { -+ unsigned long flags; -+ -+ raw_local_irq_save(flags); -+ __asm__ __volatile__( -+ " lw %0, %1 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "1: add %0, %2 \n" -+#else -+ " addu %0, %2 \n" -+#endif -+ " sw %0, %1 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Note: Dest reg is not modified on overflow */ -+ "2: \n" -+ _ASM_EXTABLE(1b, 2b) -+#endif -+ : "=&r" (result), "+m" (v->counter) : "Ir" (i)); -+ raw_local_irq_restore(flags); -+ } -+ -+ smp_llsc_mb(); -+ -+ return result; -+} -+ -+static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) - { - int result; - -@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) - return result; - } - --static __inline__ int atomic_sub_return(int i, atomic_t * v) -+static __inline__ int atomic_sub_return(int i, atomic_t *v) -+{ -+ int result; -+ int temp; -+ -+ smp_mb__before_llsc(); -+ -+ if (kernel_uses_llsc && R10000_LLSC_WAR) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: ll %1, %2 # atomic_sub_return \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "2: sub %0, %1, %3 \n" -+#else -+ " subu %0, %1, %3 \n" -+#endif -+ " sc %0, %2 \n" -+ " beqzl %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ " b 4f \n" -+ " .set noreorder \n" -+ "3: b 5f \n" -+ " move %0, %1 \n" -+ " .set reorder \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ "4: subu %0, %1, %3 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "5: \n" -+#endif -+ " .set mips0 \n" -+ : "=&r" (result), "=&r" (temp), "=m" (v->counter) -+ : "Ir" (i), "m" (v->counter) -+ : "memory"); -+ } else if (kernel_uses_llsc) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: ll %1, %2 # atomic_sub_return \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "2: sub %0, %1, %3 \n" -+#else -+ " subu %0, %1, %3 \n" -+#endif -+ " sc %0, %2 \n" -+ " bnez %0, 4f \n" -+ " b 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ " .set noreorder \n" -+ "3: b 5f \n" -+ " move %0, %1 \n" -+ " .set reorder \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ "4: subu %0, %1, %3 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "5: \n" -+#endif -+ " .set mips0 \n" -+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else { -+ unsigned long flags; -+ -+ raw_local_irq_save(flags); -+ __asm__ __volatile__( -+ " lw %0, %1 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "1: sub %0, %2 \n" -+#else -+ " subu %0, %2 \n" -+#endif -+ " sw %0, %1 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Note: Dest reg is not modified on overflow */ -+ "2: \n" -+ _ASM_EXTABLE(1b, 2b) -+#endif -+ : "=&r" (result), "+m" (v->counter) : "Ir" (i)); -+ raw_local_irq_restore(flags); -+ } -+ -+ smp_llsc_mb(); -+ -+ return result; -+} -+static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v) - { - int result; - -@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) - * Atomically test @v and subtract @i if @v is greater or equal than @i. - * The function returns the old value of @v minus @i. - */ --static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) -+static __inline__ int atomic_sub_if_positive(int i, atomic_t *v) - { - int result; - -@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) - return result; - } - --#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) --#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) -+static inline int atomic_cmpxchg(atomic_t *v, int old, int new) -+{ -+ return cmpxchg(&v->counter, old, new); -+} -+ -+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, -+ int new) -+{ -+ return cmpxchg(&(v->counter), old, new); -+} -+ -+static inline int atomic_xchg(atomic_t *v, int new) -+{ -+ return xchg(&v->counter, new); -+} -+ -+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) -+{ -+ return xchg(&(v->counter), new); -+} - - /** - * __atomic_add_unless - add unless the number is a given value -@@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) - - #define atomic_dec_return(v) atomic_sub_return(1, (v)) - #define atomic_inc_return(v) atomic_add_return(1, (v)) -+static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v) -+{ -+ return atomic_add_return_unchecked(1, v); -+} - - /* - * atomic_sub_and_test - subtract value from variable and test result -@@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) - * other cases. - */ - #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) -+static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) -+{ -+ return atomic_add_return_unchecked(1, v) == 0; -+} - - /* - * atomic_dec_and_test - decrement by 1 and test -@@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) - * Atomically increments @v by 1. - */ - #define atomic_inc(v) atomic_add(1, (v)) -+static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v) -+{ -+ atomic_add_unchecked(1, v); -+} - - /* - * atomic_dec - decrement and test -@@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) - * Atomically decrements @v by 1. - */ - #define atomic_dec(v) atomic_sub(1, (v)) -+static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v) -+{ -+ atomic_sub_unchecked(1, v); -+} - - /* - * atomic_add_negative - add and test if negative -@@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) - * @v: pointer of type atomic64_t - * - */ --#define atomic64_read(v) (*(volatile long *)&(v)->counter) -+static inline long atomic64_read(const atomic64_t *v) -+{ -+ return (*(volatile const long *) &v->counter); -+} -+ -+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) -+{ -+ return (*(volatile const long *) &v->counter); -+} - - /* - * atomic64_set - set atomic variable - * @v: pointer of type atomic64_t - * @i: required value - */ --#define atomic64_set(v, i) ((v)->counter = (i)) -+static inline void atomic64_set(atomic64_t *v, long i) -+{ -+ v->counter = i; -+} -+ -+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) -+{ -+ v->counter = i; -+} - - /* - * atomic64_add - add integer to atomic variable -@@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) - * - * Atomically adds @i to @v. - */ --static __inline__ void atomic64_add(long i, atomic64_t * v) -+static __inline__ void atomic64_add(long i, atomic64_t *v) -+{ -+ long temp; -+ -+ if (kernel_uses_llsc && R10000_LLSC_WAR) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: lld %0, %1 # atomic64_add \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "2: dadd %0, %2 \n" -+#else -+ " daddu %0, %2 \n" -+#endif -+ " scd %0, %1 \n" -+ " beqzl %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "3: \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ " .set mips0 \n" -+ : "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else if (kernel_uses_llsc) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: lld %0, %1 # atomic64_add \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "2: dadd %0, %2 \n" -+#else -+ " daddu %0, %2 \n" -+#endif -+ " scd %0, %1 \n" -+ " beqz %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "3: \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ " .set mips0 \n" -+ : "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else { -+ unsigned long flags; -+ -+ raw_local_irq_save(flags); -+ __asm__ __volatile__( -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "1: dadd %0, %1 \n" -+ "2: \n" -+ _ASM_EXTABLE(1b, 2b) -+#else -+ " daddu %0, %1 \n" -+#endif -+ : "+r" (v->counter) : "Ir" (i)); -+ raw_local_irq_restore(flags); -+ } -+} -+static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) - { - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; -@@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) - * - * Atomically subtracts @i from @v. - */ --static __inline__ void atomic64_sub(long i, atomic64_t * v) -+static __inline__ void atomic64_sub(long i, atomic64_t *v) -+{ -+ long temp; -+ -+ if (kernel_uses_llsc && R10000_LLSC_WAR) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: lld %0, %1 # atomic64_sub \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "2: dsub %0, %2 \n" -+#else -+ " dsubu %0, %2 \n" -+#endif -+ " scd %0, %1 \n" -+ " beqzl %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "3: \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ " .set mips0 \n" -+ : "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else if (kernel_uses_llsc) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: lld %0, %1 # atomic64_sub \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "2: dsub %0, %2 \n" -+#else -+ " dsubu %0, %2 \n" -+#endif -+ " scd %0, %1 \n" -+ " beqz %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "3: \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ " .set mips0 \n" -+ : "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else { -+ unsigned long flags; -+ -+ raw_local_irq_save(flags); -+ __asm__ __volatile__( -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "1: dsub %0, %1 \n" -+ "2: \n" -+ _ASM_EXTABLE(1b, 2b) -+#else -+ " dsubu %0, %1 \n" -+#endif -+ : "+r" (v->counter) : "Ir" (i)); -+ raw_local_irq_restore(flags); -+ } -+} -+ -+static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v) - { - if (kernel_uses_llsc && R10000_LLSC_WAR) { - long temp; -@@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) - /* - * Same as above, but return the result value - */ --static __inline__ long atomic64_add_return(long i, atomic64_t * v) -+static __inline__ long atomic64_add_return(long i, atomic64_t *v) -+{ -+ long result; -+ long temp; -+ -+ smp_mb__before_llsc(); -+ -+ if (kernel_uses_llsc && R10000_LLSC_WAR) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: lld %1, %2 # atomic64_add_return \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "2: dadd %0, %1, %3 \n" -+#else -+ " daddu %0, %1, %3 \n" -+#endif -+ " scd %0, %2 \n" -+ " beqzl %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ " b 4f \n" -+ " .set noreorder \n" -+ "3: b 5f \n" -+ " move %0, %1 \n" -+ " .set reorder \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ "4: daddu %0, %1, %3 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "5: \n" -+#endif -+ " .set mips0 \n" -+ : "=&r" (result), "=&r" (temp), "+m" (v->counter) -+ : "Ir" (i)); -+ } else if (kernel_uses_llsc) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: lld %1, %2 # atomic64_add_return \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "2: dadd %0, %1, %3 \n" -+#else -+ " daddu %0, %1, %3 \n" -+#endif -+ " scd %0, %2 \n" -+ " bnez %0, 4f \n" -+ " b 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ " .set noreorder \n" -+ "3: b 5f \n" -+ " move %0, %1 \n" -+ " .set reorder \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ "4: daddu %0, %1, %3 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "5: \n" -+#endif -+ " .set mips0 \n" -+ : "=&r" (result), "=&r" (temp), "=m" (v->counter) -+ : "Ir" (i), "m" (v->counter) -+ : "memory"); -+ } else { -+ unsigned long flags; -+ -+ raw_local_irq_save(flags); -+ __asm__ __volatile__( -+ " ld %0, %1 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "1: dadd %0, %2 \n" -+#else -+ " daddu %0, %2 \n" -+#endif -+ " sd %0, %1 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Note: Dest reg is not modified on overflow */ -+ "2: \n" -+ _ASM_EXTABLE(1b, 2b) -+#endif -+ : "=&r" (result), "+m" (v->counter) : "Ir" (i)); -+ raw_local_irq_restore(flags); -+ } -+ -+ smp_llsc_mb(); -+ -+ return result; -+} -+static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) - { - long result; - -@@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) - return result; - } - --static __inline__ long atomic64_sub_return(long i, atomic64_t * v) -+static __inline__ long atomic64_sub_return(long i, atomic64_t *v) -+{ -+ long result; -+ long temp; -+ -+ smp_mb__before_llsc(); -+ -+ if (kernel_uses_llsc && R10000_LLSC_WAR) { -+ long temp; -+ -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: lld %1, %2 # atomic64_sub_return \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "2: dsub %0, %1, %3 \n" -+#else -+ " dsubu %0, %1, %3 \n" -+#endif -+ " scd %0, %2 \n" -+ " beqzl %0, 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ " b 4f \n" -+ " .set noreorder \n" -+ "3: b 5f \n" -+ " move %0, %1 \n" -+ " .set reorder \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ "4: dsubu %0, %1, %3 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "5: \n" -+#endif -+ " .set mips0 \n" -+ : "=&r" (result), "=&r" (temp), "=m" (v->counter) -+ : "Ir" (i), "m" (v->counter) -+ : "memory"); -+ } else if (kernel_uses_llsc) { -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1: lld %1, %2 # atomic64_sub_return \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "2: dsub %0, %1, %3 \n" -+#else -+ " dsubu %0, %1, %3 \n" -+#endif -+ " scd %0, %2 \n" -+ " bnez %0, 4f \n" -+ " b 1b \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ " .set noreorder \n" -+ "3: b 5f \n" -+ " move %0, %1 \n" -+ " .set reorder \n" -+ _ASM_EXTABLE(2b, 3b) -+#endif -+ "4: dsubu %0, %1, %3 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ "5: \n" -+#endif -+ " .set mips0 \n" -+ : "=&r" (result), "=&r" (temp), "=m" (v->counter) -+ : "Ir" (i), "m" (v->counter) -+ : "memory"); -+ } else { -+ unsigned long flags; -+ -+ raw_local_irq_save(flags); -+ __asm__ __volatile__( -+ " ld %0, %1 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Exception on overflow. */ -+ "1: dsub %0, %2 \n" -+#else -+ " dsubu %0, %2 \n" -+#endif -+ " sd %0, %1 \n" -+#ifdef CONFIG_PAX_REFCOUNT -+ /* Note: Dest reg is not modified on overflow */ -+ "2: \n" -+ _ASM_EXTABLE(1b, 2b) -+#endif -+ : "=&r" (result), "+m" (v->counter) : "Ir" (i)); -+ raw_local_irq_restore(flags); -+ } -+ -+ smp_llsc_mb(); -+ -+ return result; -+} -+ -+static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v) - { - long result; - -@@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) - * Atomically test @v and subtract @i if @v is greater or equal than @i. - * The function returns the old value of @v minus @i. - */ --static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) -+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v) - { - long result; - -@@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) - return result; - } - --#define atomic64_cmpxchg(v, o, n) \ -- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) --#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new))) -+static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) -+{ -+ return cmpxchg(&v->counter, old, new); -+} -+ -+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, -+ long new) -+{ -+ return cmpxchg(&(v->counter), old, new); -+} -+ -+static inline long atomic64_xchg(atomic64_t *v, long new) -+{ -+ return xchg(&v->counter, new); -+} -+ -+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) -+{ -+ return xchg(&(v->counter), new); -+} - - /** - * atomic64_add_unless - add unless the number is a given value -@@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) - - #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) - #define atomic64_inc_return(v) atomic64_add_return(1, (v)) -+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v)) - - /* - * atomic64_sub_and_test - subtract value from variable and test result -@@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) - * other cases. - */ - #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -+#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0) - - /* - * atomic64_dec_and_test - decrement by 1 and test -@@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) - * Atomically increments @v by 1. - */ - #define atomic64_inc(v) atomic64_add(1, (v)) -+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v)) - - /* - * atomic64_dec - decrement and test -@@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) - * Atomically decrements @v by 1. - */ - #define atomic64_dec(v) atomic64_sub(1, (v)) -+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v)) - - /* - * atomic64_add_negative - add and test if negative -diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h -index e1aa4e4..670b68b 100644 ---- a/arch/mips/include/asm/barrier.h -+++ b/arch/mips/include/asm/barrier.h -@@ -184,7 +184,7 @@ - do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ -- ACCESS_ONCE(*p) = (v); \ -+ ACCESS_ONCE_RW(*p) = (v); \ - } while (0) - - #define smp_load_acquire(p) \ -diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h -index b4db69f..8f3b093 100644 ---- a/arch/mips/include/asm/cache.h -+++ b/arch/mips/include/asm/cache.h -@@ -9,10 +9,11 @@ - #ifndef _ASM_CACHE_H - #define _ASM_CACHE_H - -+#include <linux/const.h> - #include <kmalloc.h> - - #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #define SMP_CACHE_SHIFT L1_CACHE_SHIFT - #define SMP_CACHE_BYTES L1_CACHE_BYTES -diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h -index d414405..6bb4ba2 100644 ---- a/arch/mips/include/asm/elf.h -+++ b/arch/mips/include/asm/elf.h -@@ -398,13 +398,16 @@ extern const char *__elf_platform; - #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) - #endif - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) -+ -+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#endif -+ - #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 - struct linux_binprm; - extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int uses_interp); - --struct mm_struct; --extern unsigned long arch_randomize_brk(struct mm_struct *mm); --#define arch_randomize_brk arch_randomize_brk -- - #endif /* _ASM_ELF_H */ -diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h -index c1f6afa..38cc6e9 100644 ---- a/arch/mips/include/asm/exec.h -+++ b/arch/mips/include/asm/exec.h -@@ -12,6 +12,6 @@ - #ifndef _ASM_EXEC_H - #define _ASM_EXEC_H - --extern unsigned long arch_align_stack(unsigned long sp); -+#define arch_align_stack(x) ((x) & ~0xfUL) - - #endif /* _ASM_EXEC_H */ -diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h -index 9e8ef59..1139d6b 100644 ---- a/arch/mips/include/asm/hw_irq.h -+++ b/arch/mips/include/asm/hw_irq.h -@@ -10,7 +10,7 @@ - - #include <linux/atomic.h> - --extern atomic_t irq_err_count; -+extern atomic_unchecked_t irq_err_count; - - /* - * interrupt-retrigger: NOP for now. This may not be appropriate for all -diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h -index d44622c..64990d2 100644 ---- a/arch/mips/include/asm/local.h -+++ b/arch/mips/include/asm/local.h -@@ -12,15 +12,25 @@ typedef struct - atomic_long_t a; - } local_t; - -+typedef struct { -+ atomic_long_unchecked_t a; -+} local_unchecked_t; -+ - #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } - - #define local_read(l) atomic_long_read(&(l)->a) -+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) - #define local_set(l, i) atomic_long_set(&(l)->a, (i)) -+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i)) - - #define local_add(i, l) atomic_long_add((i), (&(l)->a)) -+#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a)) - #define local_sub(i, l) atomic_long_sub((i), (&(l)->a)) -+#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a)) - #define local_inc(l) atomic_long_inc(&(l)->a) -+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a) - #define local_dec(l) atomic_long_dec(&(l)->a) -+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a) - - /* - * Same as above, but return the result value -@@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l) - return result; - } - -+static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l) -+{ -+ unsigned long result; -+ -+ if (kernel_uses_llsc && R10000_LLSC_WAR) { -+ unsigned long temp; -+ -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1:" __LL "%1, %2 # local_add_return \n" -+ " addu %0, %1, %3 \n" -+ __SC "%0, %2 \n" -+ " beqzl %0, 1b \n" -+ " addu %0, %1, %3 \n" -+ " .set mips0 \n" -+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) -+ : "Ir" (i), "m" (l->a.counter) -+ : "memory"); -+ } else if (kernel_uses_llsc) { -+ unsigned long temp; -+ -+ __asm__ __volatile__( -+ " .set mips3 \n" -+ "1:" __LL "%1, %2 # local_add_return \n" -+ " addu %0, %1, %3 \n" -+ __SC "%0, %2 \n" -+ " beqz %0, 1b \n" -+ " addu %0, %1, %3 \n" -+ " .set mips0 \n" -+ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter) -+ : "Ir" (i), "m" (l->a.counter) -+ : "memory"); -+ } else { -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ result = l->a.counter; -+ result += i; -+ l->a.counter = result; -+ local_irq_restore(flags); -+ } -+ -+ return result; -+} -+ - static __inline__ long local_sub_return(long i, local_t * l) - { - unsigned long result; -@@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l) - - #define local_cmpxchg(l, o, n) \ - ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) -+#define local_cmpxchg_unchecked(l, o, n) \ -+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) - #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) - - /** -diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h -index 5e08bcc..cfedefc 100644 ---- a/arch/mips/include/asm/page.h -+++ b/arch/mips/include/asm/page.h -@@ -120,7 +120,7 @@ extern void copy_user_highpage(struct page *to, struct page *from, - #ifdef CONFIG_CPU_MIPS32 - typedef struct { unsigned long pte_low, pte_high; } pte_t; - #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) -- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) -+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; }) - #else - typedef struct { unsigned long long pte; } pte_t; - #define pte_val(x) ((x).pte) -diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h -index b336037..5b874cc 100644 ---- a/arch/mips/include/asm/pgalloc.h -+++ b/arch/mips/include/asm/pgalloc.h -@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) - { - set_pud(pud, __pud((unsigned long)pmd)); - } -+ -+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) -+{ -+ pud_populate(mm, pud, pmd); -+} - #endif - - /* -diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h -index b154953..f5e6871 100644 ---- a/arch/mips/include/asm/pgtable.h -+++ b/arch/mips/include/asm/pgtable.h -@@ -20,6 +20,9 @@ - #include <asm/io.h> - #include <asm/pgtable-bits.h> - -+#define ktla_ktva(addr) (addr) -+#define ktva_ktla(addr) (addr) -+ - struct mm_struct; - struct vm_area_struct; - -diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h -index 25da651..ae2a259 100644 ---- a/arch/mips/include/asm/smtc_proc.h -+++ b/arch/mips/include/asm/smtc_proc.h -@@ -18,6 +18,6 @@ extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; - - /* Count of number of recoveries of "stolen" FPU access rights on 34K */ - --extern atomic_t smtc_fpu_recoveries; -+extern atomic_unchecked_t smtc_fpu_recoveries; - - #endif /* __ASM_SMTC_PROC_H */ -diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h -index e80ae50..b93dd2e 100644 ---- a/arch/mips/include/asm/thread_info.h -+++ b/arch/mips/include/asm/thread_info.h -@@ -105,6 +105,8 @@ static inline struct thread_info *current_thread_info(void) - #define TIF_SECCOMP 4 /* secure computing */ - #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */ - #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */ -+/* li takes a 32bit immediate */ -+#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */ - #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ - #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ - #define TIF_NOHZ 19 /* in adaptive nohz mode */ -@@ -134,14 +136,15 @@ static inline struct thread_info *current_thread_info(void) - #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) - #define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS) - #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) -+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) - - #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ - _TIF_SYSCALL_AUDIT | \ -- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) -+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID) - - /* work to do in syscall_trace_leave() */ - #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ -- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) -+ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID) - - /* work to do on interrupt/exception return */ - #define _TIF_WORK_MASK \ -@@ -149,7 +152,7 @@ static inline struct thread_info *current_thread_info(void) - /* work to do on any return to u-space */ - #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \ - _TIF_WORK_SYSCALL_EXIT | \ -- _TIF_SYSCALL_TRACEPOINT) -+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID) - - /* - * We stash processor id into a COP0 register to retrieve it fast -diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h -index f3fa375..3af6637 100644 ---- a/arch/mips/include/asm/uaccess.h -+++ b/arch/mips/include/asm/uaccess.h -@@ -128,6 +128,7 @@ extern u64 __ua_limit; - __ok == 0; \ - }) - -+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) - #define access_ok(type, addr, size) \ - likely(__access_ok((addr), (size), __access_mask)) - -diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c -index 1188e00..41cf144 100644 ---- a/arch/mips/kernel/binfmt_elfn32.c -+++ b/arch/mips/kernel/binfmt_elfn32.c -@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; - #undef ELF_ET_DYN_BASE - #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) -+ -+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#endif -+ - #include <asm/processor.h> - #include <linux/module.h> - #include <linux/elfcore.h> -diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c -index 71df942..199dd19 100644 ---- a/arch/mips/kernel/binfmt_elfo32.c -+++ b/arch/mips/kernel/binfmt_elfo32.c -@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; - #undef ELF_ET_DYN_BASE - #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL) -+ -+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) -+#endif -+ - #include <asm/processor.h> - - /* These MUST be defined before elf.h gets included */ -diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c -index 2b91fe8..fe4f6b4 100644 ---- a/arch/mips/kernel/i8259.c -+++ b/arch/mips/kernel/i8259.c -@@ -205,7 +205,7 @@ spurious_8259A_irq: - printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq); - spurious_irq_mask |= irqmask; - } -- atomic_inc(&irq_err_count); -+ atomic_inc_unchecked(&irq_err_count); - /* - * Theoretically we do not have to handle this IRQ, - * but in Linux this does not cause problems and is -diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c -index 44a1f79..2bd6aa3 100644 ---- a/arch/mips/kernel/irq-gt641xx.c -+++ b/arch/mips/kernel/irq-gt641xx.c -@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void) - } - } - -- atomic_inc(&irq_err_count); -+ atomic_inc_unchecked(&irq_err_count); - } - - void __init gt641xx_irq_init(void) -diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c -index 7479d8d..5c37e62 100644 ---- a/arch/mips/kernel/irq.c -+++ b/arch/mips/kernel/irq.c -@@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq) - printk("unexpected IRQ # %d\n", irq); - } - --atomic_t irq_err_count; -+atomic_unchecked_t irq_err_count; - - int arch_show_interrupts(struct seq_file *p, int prec) - { -- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); -+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count)); - return 0; - } - - asmlinkage void spurious_interrupt(void) - { -- atomic_inc(&irq_err_count); -+ atomic_inc_unchecked(&irq_err_count); - } - - void __init init_IRQ(void) -@@ -111,6 +111,8 @@ void __init init_IRQ(void) - } - - #ifdef CONFIG_DEBUG_STACKOVERFLOW -+ -+extern void gr_handle_kernel_exploit(void); - static inline void check_stack_overflow(void) - { - unsigned long sp; -@@ -126,6 +128,7 @@ static inline void check_stack_overflow(void) - printk("do_IRQ: stack overflow: %ld\n", - sp - sizeof(struct thread_info)); - dump_stack(); -+ gr_handle_kernel_exploit(); - } - } - #else -diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c -index 6ae540e..b7396dc 100644 ---- a/arch/mips/kernel/process.c -+++ b/arch/mips/kernel/process.c -@@ -562,15 +562,3 @@ unsigned long get_wchan(struct task_struct *task) - out: - return pc; - } -- --/* -- * Don't forget that the stack pointer must be aligned on a 8 bytes -- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. -- */ --unsigned long arch_align_stack(unsigned long sp) --{ -- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) -- sp -= get_random_int() & ~PAGE_MASK; -- -- return sp & ALMASK; --} -diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c -index 60f48fe..a2df508 100644 ---- a/arch/mips/kernel/ptrace.c -+++ b/arch/mips/kernel/ptrace.c -@@ -790,6 +790,10 @@ long arch_ptrace(struct task_struct *child, long request, - return ret; - } - -+#ifdef CONFIG_GRKERNSEC_SETXID -+extern void gr_delayed_cred_worker(void); -+#endif -+ - /* - * Notification of system call entry/exit - * - triggered by current->work.syscall_trace -@@ -806,6 +810,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs) - tracehook_report_syscall_entry(regs)) - ret = -1; - -+#ifdef CONFIG_GRKERNSEC_SETXID -+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) -+ gr_delayed_cred_worker(); -+#endif -+ - if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_enter(regs, regs->regs[2]); - -diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c -index 07fc524..b9d7f28 100644 ---- a/arch/mips/kernel/reset.c -+++ b/arch/mips/kernel/reset.c -@@ -13,6 +13,7 @@ - #include <linux/reboot.h> - - #include <asm/reboot.h> -+#include <asm/bug.h> - - /* - * Urgs ... Too many MIPS machines to handle this in a generic way. -@@ -29,16 +30,19 @@ void machine_restart(char *command) - { - if (_machine_restart) - _machine_restart(command); -+ BUG(); - } - - void machine_halt(void) - { - if (_machine_halt) - _machine_halt(); -+ BUG(); - } - - void machine_power_off(void) - { - if (pm_power_off) - pm_power_off(); -+ BUG(); - } -diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c -index c10aa84..9ec2e60 100644 ---- a/arch/mips/kernel/smtc-proc.c -+++ b/arch/mips/kernel/smtc-proc.c -@@ -31,7 +31,7 @@ unsigned long selfipis[NR_CPUS]; - - struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS]; - --atomic_t smtc_fpu_recoveries; -+atomic_unchecked_t smtc_fpu_recoveries; - - static int smtc_proc_show(struct seq_file *m, void *v) - { -@@ -48,7 +48,7 @@ static int smtc_proc_show(struct seq_file *m, void *v) - for(i = 0; i < NR_CPUS; i++) - seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis); - seq_printf(m, "%d Recoveries of \"stolen\" FPU\n", -- atomic_read(&smtc_fpu_recoveries)); -+ atomic_read_unchecked(&smtc_fpu_recoveries)); - return 0; - } - -@@ -73,7 +73,7 @@ void init_smtc_stats(void) - smtc_cpu_stats[i].selfipis = 0; - } - -- atomic_set(&smtc_fpu_recoveries, 0); -+ atomic_set_unchecked(&smtc_fpu_recoveries, 0); - - proc_create("smtc", 0444, NULL, &smtc_proc_fops); - } -diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c -index dfc1b91..11a2c07 100644 ---- a/arch/mips/kernel/smtc.c -+++ b/arch/mips/kernel/smtc.c -@@ -1359,7 +1359,7 @@ void smtc_soft_dump(void) - } - smtc_ipi_qdump(); - printk("%d Recoveries of \"stolen\" FPU\n", -- atomic_read(&smtc_fpu_recoveries)); -+ atomic_read_unchecked(&smtc_fpu_recoveries)); - } - - -diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c -index c24ad5f..9983ab2 100644 ---- a/arch/mips/kernel/sync-r4k.c -+++ b/arch/mips/kernel/sync-r4k.c -@@ -20,8 +20,8 @@ - #include <asm/mipsregs.h> - - static atomic_t count_start_flag = ATOMIC_INIT(0); --static atomic_t count_count_start = ATOMIC_INIT(0); --static atomic_t count_count_stop = ATOMIC_INIT(0); -+static atomic_unchecked_t count_count_start = ATOMIC_INIT(0); -+static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0); - static atomic_t count_reference = ATOMIC_INIT(0); - - #define COUNTON 100 -@@ -68,13 +68,13 @@ void synchronise_count_master(int cpu) - - for (i = 0; i < NR_LOOPS; i++) { - /* slaves loop on '!= 2' */ -- while (atomic_read(&count_count_start) != 1) -+ while (atomic_read_unchecked(&count_count_start) != 1) - mb(); -- atomic_set(&count_count_stop, 0); -+ atomic_set_unchecked(&count_count_stop, 0); - smp_wmb(); - - /* this lets the slaves write their count register */ -- atomic_inc(&count_count_start); -+ atomic_inc_unchecked(&count_count_start); - - /* - * Everyone initialises count in the last loop: -@@ -85,11 +85,11 @@ void synchronise_count_master(int cpu) - /* - * Wait for all slaves to leave the synchronization point: - */ -- while (atomic_read(&count_count_stop) != 1) -+ while (atomic_read_unchecked(&count_count_stop) != 1) - mb(); -- atomic_set(&count_count_start, 0); -+ atomic_set_unchecked(&count_count_start, 0); - smp_wmb(); -- atomic_inc(&count_count_stop); -+ atomic_inc_unchecked(&count_count_stop); - } - /* Arrange for an interrupt in a short while */ - write_c0_compare(read_c0_count() + COUNTON); -@@ -130,8 +130,8 @@ void synchronise_count_slave(int cpu) - initcount = atomic_read(&count_reference); - - for (i = 0; i < NR_LOOPS; i++) { -- atomic_inc(&count_count_start); -- while (atomic_read(&count_count_start) != 2) -+ atomic_inc_unchecked(&count_count_start); -+ while (atomic_read_unchecked(&count_count_start) != 2) - mb(); - - /* -@@ -140,8 +140,8 @@ void synchronise_count_slave(int cpu) - if (i == NR_LOOPS-1) - write_c0_count(initcount); - -- atomic_inc(&count_count_stop); -- while (atomic_read(&count_count_stop) != 2) -+ atomic_inc_unchecked(&count_count_stop); -+ while (atomic_read_unchecked(&count_count_stop) != 2) - mb(); - } - /* Arrange for an interrupt in a short while */ -diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c -index 81e6ae0..6ab6e79 100644 ---- a/arch/mips/kernel/traps.c -+++ b/arch/mips/kernel/traps.c -@@ -691,7 +691,18 @@ asmlinkage void do_ov(struct pt_regs *regs) - siginfo_t info; - - prev_state = exception_enter(); -- die_if_kernel("Integer overflow", regs); -+ if (unlikely(!user_mode(regs))) { -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ if (fixup_exception(regs)) { -+ pax_report_refcount_overflow(regs); -+ exception_exit(prev_state); -+ return; -+ } -+#endif -+ -+ die("Integer overflow", regs); -+ } - - info.si_code = FPE_INTOVF; - info.si_signo = SIGFPE; -diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c -index 897c605..c421760 100644 ---- a/arch/mips/kvm/kvm_mips.c -+++ b/arch/mips/kvm/kvm_mips.c -@@ -835,7 +835,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) - return r; - } - --int kvm_arch_init(void *opaque) -+int kvm_arch_init(const void *opaque) - { - int ret; - -diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c -index 70ab5d6..62940fe 100644 ---- a/arch/mips/mm/fault.c -+++ b/arch/mips/mm/fault.c -@@ -28,6 +28,23 @@ - #include <asm/highmem.h> /* For VMALLOC_END */ - #include <linux/kdebug.h> - -+#ifdef CONFIG_PAX_PAGEEXEC -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 5; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - /* - * This routine handles page faults. It determines the address, - * and the problem, and then passes it off to one of the appropriate -@@ -201,6 +218,14 @@ bad_area: - bad_area_nosemaphore: - /* User mode accesses just cause a SIGSEGV */ - if (user_mode(regs)) { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) { -+ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs)); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - tsk->thread.cp0_badvaddr = address; - tsk->thread.error_code = write; - #if 0 -diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c -index f1baadd..5472dca 100644 ---- a/arch/mips/mm/mmap.c -+++ b/arch/mips/mm/mmap.c -@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, - struct vm_area_struct *vma; - unsigned long addr = addr0; - int do_color_align; -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); - struct vm_unmapped_area_info info; - - if (unlikely(len > TASK_SIZE)) -@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, - do_color_align = 1; - - /* requesting a specific address */ -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); -@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) - return addr; - } - - info.length = len; - info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; - info.align_offset = pgoff << PAGE_SHIFT; -+ info.threadstack_offset = offset; - - if (dir == DOWN) { - info.flags = VM_UNMAPPED_AREA_TOPDOWN; -@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - { - unsigned long random_factor = 0UL; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (current->flags & PF_RANDOMIZE) { - random_factor = get_random_int(); - random_factor = random_factor << PAGE_SHIFT; -@@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - - if (mmap_is_legacy()) { - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area; - } else { - mm->mmap_base = mmap_base(random_factor); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } - } - --static inline unsigned long brk_rnd(void) --{ -- unsigned long rnd = get_random_int(); -- -- rnd = rnd << PAGE_SHIFT; -- /* 8MB for 32bit, 256MB for 64bit */ -- if (TASK_IS_32BIT_ADDR) -- rnd = rnd & 0x7ffffful; -- else -- rnd = rnd & 0xffffffful; -- -- return rnd; --} -- --unsigned long arch_randomize_brk(struct mm_struct *mm) --{ -- unsigned long base = mm->brk; -- unsigned long ret; -- -- ret = PAGE_ALIGN(base + brk_rnd()); -- -- if (ret < mm->brk) -- return mm->brk; -- -- return ret; --} -- - int __virt_addr_valid(const volatile void *kaddr) - { - return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); -diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c -index 59cccd9..f39ac2f 100644 ---- a/arch/mips/pci/pci-octeon.c -+++ b/arch/mips/pci/pci-octeon.c -@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn, - - - static struct pci_ops octeon_pci_ops = { -- octeon_read_config, -- octeon_write_config, -+ .read = octeon_read_config, -+ .write = octeon_write_config, - }; - - static struct resource octeon_pci_mem_resource = { -diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c -index 5e36c33..eb4a17b 100644 ---- a/arch/mips/pci/pcie-octeon.c -+++ b/arch/mips/pci/pcie-octeon.c -@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn, - } - - static struct pci_ops octeon_pcie0_ops = { -- octeon_pcie0_read_config, -- octeon_pcie0_write_config, -+ .read = octeon_pcie0_read_config, -+ .write = octeon_pcie0_write_config, - }; - - static struct resource octeon_pcie0_mem_resource = { -@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = { - }; - - static struct pci_ops octeon_pcie1_ops = { -- octeon_pcie1_read_config, -- octeon_pcie1_write_config, -+ .read = octeon_pcie1_read_config, -+ .write = octeon_pcie1_write_config, - }; - - static struct resource octeon_pcie1_mem_resource = { -@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = { - }; - - static struct pci_ops octeon_dummy_ops = { -- octeon_dummy_read_config, -- octeon_dummy_write_config, -+ .read = octeon_dummy_read_config, -+ .write = octeon_dummy_write_config, - }; - - static struct resource octeon_dummy_mem_resource = { -diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c -index a2358b4..7cead4f 100644 ---- a/arch/mips/sgi-ip27/ip27-nmi.c -+++ b/arch/mips/sgi-ip27/ip27-nmi.c -@@ -187,9 +187,9 @@ void - cont_nmi_dump(void) - { - #ifndef REAL_NMI_SIGNAL -- static atomic_t nmied_cpus = ATOMIC_INIT(0); -+ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0); - -- atomic_inc(&nmied_cpus); -+ atomic_inc_unchecked(&nmied_cpus); - #endif - /* - * Only allow 1 cpu to proceed -@@ -233,7 +233,7 @@ cont_nmi_dump(void) - udelay(10000); - } - #else -- while (atomic_read(&nmied_cpus) != num_online_cpus()); -+ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus()); - #endif - - /* -diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c -index a046b30..6799527 100644 ---- a/arch/mips/sni/rm200.c -+++ b/arch/mips/sni/rm200.c -@@ -270,7 +270,7 @@ spurious_8259A_irq: - "spurious RM200 8259A interrupt: IRQ%d.\n", irq); - spurious_irq_mask |= irqmask; - } -- atomic_inc(&irq_err_count); -+ atomic_inc_unchecked(&irq_err_count); - /* - * Theoretically we do not have to handle this IRQ, - * but in Linux this does not cause problems and is -diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c -index 41e873b..34d33a7 100644 ---- a/arch/mips/vr41xx/common/icu.c -+++ b/arch/mips/vr41xx/common/icu.c -@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq) - - printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2); - -- atomic_inc(&irq_err_count); -+ atomic_inc_unchecked(&irq_err_count); - - return -1; - } -diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c -index ae0e4ee..e8f0692 100644 ---- a/arch/mips/vr41xx/common/irq.c -+++ b/arch/mips/vr41xx/common/irq.c -@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq) - irq_cascade_t *cascade; - - if (irq >= NR_IRQS) { -- atomic_inc(&irq_err_count); -+ atomic_inc_unchecked(&irq_err_count); - return; - } - -@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq) - ret = cascade->get_irq(irq); - irq = ret; - if (ret < 0) -- atomic_inc(&irq_err_count); -+ atomic_inc_unchecked(&irq_err_count); - else - irq_dispatch(irq); - if (!irqd_irq_disabled(idata) && chip->irq_unmask) -diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h -index 967d144..db12197 100644 ---- a/arch/mn10300/proc-mn103e010/include/proc/cache.h -+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h -@@ -11,12 +11,14 @@ - #ifndef _ASM_PROC_CACHE_H - #define _ASM_PROC_CACHE_H - -+#include <linux/const.h> -+ - /* L1 cache */ - - #define L1_CACHE_NWAYS 4 /* number of ways in caches */ - #define L1_CACHE_NENTRIES 256 /* number of entries in each way */ --#define L1_CACHE_BYTES 16 /* bytes per entry */ - #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */ -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */ - #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */ - - #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */ -diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h -index bcb5df2..84fabd2 100644 ---- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h -+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h -@@ -16,13 +16,15 @@ - #ifndef _ASM_PROC_CACHE_H - #define _ASM_PROC_CACHE_H - -+#include <linux/const.h> -+ - /* - * L1 cache - */ - #define L1_CACHE_NWAYS 4 /* number of ways in caches */ - #define L1_CACHE_NENTRIES 128 /* number of entries in each way */ --#define L1_CACHE_BYTES 32 /* bytes per entry */ - #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */ -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */ - #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */ - - #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */ -diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h -index 4ce7a01..449202a 100644 ---- a/arch/openrisc/include/asm/cache.h -+++ b/arch/openrisc/include/asm/cache.h -@@ -19,11 +19,13 @@ - #ifndef __ASM_OPENRISC_CACHE_H - #define __ASM_OPENRISC_CACHE_H - -+#include <linux/const.h> -+ - /* FIXME: How can we replace these with values from the CPU... - * they shouldn't be hard-coded! - */ - --#define L1_CACHE_BYTES 16 - #define L1_CACHE_SHIFT 4 -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #endif /* __ASM_OPENRISC_CACHE_H */ -diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h -index 472886c..00e7df9 100644 ---- a/arch/parisc/include/asm/atomic.h -+++ b/arch/parisc/include/asm/atomic.h -@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v) - return dec; - } - -+#define atomic64_read_unchecked(v) atomic64_read(v) -+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) -+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) -+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) -+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) -+#define atomic64_inc_unchecked(v) atomic64_inc(v) -+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) -+#define atomic64_dec_unchecked(v) atomic64_dec(v) -+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) -+ - #endif /* !CONFIG_64BIT */ - - -diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h -index 47f11c7..3420df2 100644 ---- a/arch/parisc/include/asm/cache.h -+++ b/arch/parisc/include/asm/cache.h -@@ -5,6 +5,7 @@ - #ifndef __ARCH_PARISC_CACHE_H - #define __ARCH_PARISC_CACHE_H - -+#include <linux/const.h> - - /* - * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have -@@ -15,13 +16,13 @@ - * just ruin performance. - */ - #ifdef CONFIG_PA20 --#define L1_CACHE_BYTES 64 - #define L1_CACHE_SHIFT 6 - #else --#define L1_CACHE_BYTES 32 - #define L1_CACHE_SHIFT 5 - #endif - -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) -+ - #ifndef __ASSEMBLY__ - - #define SMP_CACHE_BYTES L1_CACHE_BYTES -diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h -index 3391d06..c23a2cc 100644 ---- a/arch/parisc/include/asm/elf.h -+++ b/arch/parisc/include/asm/elf.h -@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */ - - #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE 0x10000UL -+ -+#define PAX_DELTA_MMAP_LEN 16 -+#define PAX_DELTA_STACK_LEN 16 -+#endif -+ - /* This yields a mask that user programs can use to figure out what - instruction set this CPU supports. This could be done in user space, - but it's not easy, and we've already done it here. */ -diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h -index f213f5b..0af3e8e 100644 ---- a/arch/parisc/include/asm/pgalloc.h -+++ b/arch/parisc/include/asm/pgalloc.h -@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) - (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); - } - -+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) -+{ -+ pgd_populate(mm, pgd, pmd); -+} -+ - static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) - { - pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, -@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) - #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) - #define pmd_free(mm, x) do { } while (0) - #define pgd_populate(mm, pmd, pte) BUG() -+#define pgd_populate_kernel(mm, pmd, pte) BUG() - - #endif - -diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h -index 22b89d1..ce34230 100644 ---- a/arch/parisc/include/asm/pgtable.h -+++ b/arch/parisc/include/asm/pgtable.h -@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); - #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) - #define PAGE_COPY PAGE_EXECREAD - #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) -+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) -+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) -+#else -+# define PAGE_SHARED_NOEXEC PAGE_SHARED -+# define PAGE_COPY_NOEXEC PAGE_COPY -+# define PAGE_READONLY_NOEXEC PAGE_READONLY -+#endif -+ - #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) - #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) - #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) -diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h -index 4006964..fcb3cc2 100644 ---- a/arch/parisc/include/asm/uaccess.h -+++ b/arch/parisc/include/asm/uaccess.h -@@ -246,10 +246,10 @@ static inline unsigned long __must_check copy_from_user(void *to, - const void __user *from, - unsigned long n) - { -- int sz = __compiletime_object_size(to); -+ size_t sz = __compiletime_object_size(to); - int ret = -EFAULT; - -- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n)) -+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n)) - ret = __copy_from_user(to, from, n); - else - copy_from_user_overflow(); -diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c -index 50dfafc..b9fc230 100644 ---- a/arch/parisc/kernel/module.c -+++ b/arch/parisc/kernel/module.c -@@ -98,16 +98,38 @@ - - /* three functions to determine where in the module core - * or init pieces the location is */ -+static inline int in_init_rx(struct module *me, void *loc) -+{ -+ return (loc >= me->module_init_rx && -+ loc < (me->module_init_rx + me->init_size_rx)); -+} -+ -+static inline int in_init_rw(struct module *me, void *loc) -+{ -+ return (loc >= me->module_init_rw && -+ loc < (me->module_init_rw + me->init_size_rw)); -+} -+ - static inline int in_init(struct module *me, void *loc) - { -- return (loc >= me->module_init && -- loc <= (me->module_init + me->init_size)); -+ return in_init_rx(me, loc) || in_init_rw(me, loc); -+} -+ -+static inline int in_core_rx(struct module *me, void *loc) -+{ -+ return (loc >= me->module_core_rx && -+ loc < (me->module_core_rx + me->core_size_rx)); -+} -+ -+static inline int in_core_rw(struct module *me, void *loc) -+{ -+ return (loc >= me->module_core_rw && -+ loc < (me->module_core_rw + me->core_size_rw)); - } - - static inline int in_core(struct module *me, void *loc) - { -- return (loc >= me->module_core && -- loc <= (me->module_core + me->core_size)); -+ return in_core_rx(me, loc) || in_core_rw(me, loc); - } - - static inline int in_local(struct module *me, void *loc) -@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, - } - - /* align things a bit */ -- me->core_size = ALIGN(me->core_size, 16); -- me->arch.got_offset = me->core_size; -- me->core_size += gots * sizeof(struct got_entry); -+ me->core_size_rw = ALIGN(me->core_size_rw, 16); -+ me->arch.got_offset = me->core_size_rw; -+ me->core_size_rw += gots * sizeof(struct got_entry); - -- me->core_size = ALIGN(me->core_size, 16); -- me->arch.fdesc_offset = me->core_size; -- me->core_size += fdescs * sizeof(Elf_Fdesc); -+ me->core_size_rw = ALIGN(me->core_size_rw, 16); -+ me->arch.fdesc_offset = me->core_size_rw; -+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc); - - me->arch.got_max = gots; - me->arch.fdesc_max = fdescs; -@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) - - BUG_ON(value == 0); - -- got = me->module_core + me->arch.got_offset; -+ got = me->module_core_rw + me->arch.got_offset; - for (i = 0; got[i].addr; i++) - if (got[i].addr == value) - goto out; -@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) - #ifdef CONFIG_64BIT - static Elf_Addr get_fdesc(struct module *me, unsigned long value) - { -- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; -+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset; - - if (!value) { - printk(KERN_ERR "%s: zero OPD requested!\n", me->name); -@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value) - - /* Create new one */ - fdesc->addr = value; -- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; -+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; - return (Elf_Addr)fdesc; - } - #endif /* CONFIG_64BIT */ -@@ -843,7 +865,7 @@ register_unwind_table(struct module *me, - - table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; - end = table + sechdrs[me->arch.unwind_section].sh_size; -- gp = (Elf_Addr)me->module_core + me->arch.got_offset; -+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; - - DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", - me->arch.unwind_section, table, end, gp); -diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c -index e1ffea2..46ed66e 100644 ---- a/arch/parisc/kernel/sys_parisc.c -+++ b/arch/parisc/kernel/sys_parisc.c -@@ -89,6 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long task_size = TASK_SIZE; - int do_color_align, last_mmap; - struct vm_unmapped_area_info info; -+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); - - if (len > task_size) - return -ENOMEM; -@@ -106,6 +107,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - goto found_addr; - } - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - if (do_color_align && last_mmap) - addr = COLOR_ALIGN(addr, last_mmap, pgoff); -@@ -124,6 +129,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - info.high_limit = mmap_upper_limit(); - info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; - info.align_offset = shared_align_offset(last_mmap, pgoff); -+ info.threadstack_offset = offset; - addr = vm_unmapped_area(&info); - - found_addr: -@@ -143,6 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - unsigned long addr = addr0; - int do_color_align, last_mmap; - struct vm_unmapped_area_info info; -+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags); - - #ifdef CONFIG_64BIT - /* This should only ever run for 32-bit processes. */ -@@ -167,6 +174,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - } - - /* requesting a specific address */ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - if (do_color_align && last_mmap) - addr = COLOR_ALIGN(addr, last_mmap, pgoff); -@@ -184,6 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - info.high_limit = mm->mmap_base; - info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0; - info.align_offset = shared_align_offset(last_mmap, pgoff); -+ info.threadstack_offset = offset; - addr = vm_unmapped_area(&info); - if (!(addr & ~PAGE_MASK)) - goto found_addr; -@@ -249,6 +261,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - mm->mmap_legacy_base = mmap_legacy_base(); - mm->mmap_base = mmap_upper_limit(); - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) { -+ mm->mmap_legacy_base += mm->delta_mmap; -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+ } -+#endif -+ - if (mmap_is_legacy()) { - mm->mmap_base = mm->mmap_legacy_base; - mm->get_unmapped_area = arch_get_unmapped_area; -diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c -index 47ee620..1107387 100644 ---- a/arch/parisc/kernel/traps.c -+++ b/arch/parisc/kernel/traps.c -@@ -726,9 +726,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) - - down_read(¤t->mm->mmap_sem); - vma = find_vma(current->mm,regs->iaoq[0]); -- if (vma && (regs->iaoq[0] >= vma->vm_start) -- && (vma->vm_flags & VM_EXEC)) { -- -+ if (vma && (regs->iaoq[0] >= vma->vm_start)) { - fault_address = regs->iaoq[0]; - fault_space = regs->iasq[0]; - -diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c -index d27e388..addd2dc 100644 ---- a/arch/parisc/mm/fault.c -+++ b/arch/parisc/mm/fault.c -@@ -15,6 +15,7 @@ - #include <linux/sched.h> - #include <linux/interrupt.h> - #include <linux/module.h> -+#include <linux/unistd.h> - - #include <asm/uaccess.h> - #include <asm/traps.h> -@@ -50,7 +51,7 @@ int show_unhandled_signals = 1; - static unsigned long - parisc_acctyp(unsigned long code, unsigned int inst) - { -- if (code == 6 || code == 16) -+ if (code == 6 || code == 7 || code == 16) - return VM_EXEC; - - switch (inst & 0xf0000000) { -@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst) - } - #endif - -+#ifdef CONFIG_PAX_PAGEEXEC -+/* -+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address) -+ * -+ * returns 1 when task should be killed -+ * 2 when rt_sigreturn trampoline was detected -+ * 3 when unpatched PLT trampoline was detected -+ */ -+static int pax_handle_fetch_fault(struct pt_regs *regs) -+{ -+ -+#ifdef CONFIG_PAX_EMUPLT -+ int err; -+ -+ do { /* PaX: unpatched PLT emulation */ -+ unsigned int bl, depwi; -+ -+ err = get_user(bl, (unsigned int *)instruction_pointer(regs)); -+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4)); -+ -+ if (err) -+ break; -+ -+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) { -+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12; -+ -+ err = get_user(ldw, (unsigned int *)addr); -+ err |= get_user(bv, (unsigned int *)(addr+4)); -+ err |= get_user(ldw2, (unsigned int *)(addr+8)); -+ -+ if (err) -+ break; -+ -+ if (ldw == 0x0E801096U && -+ bv == 0xEAC0C000U && -+ ldw2 == 0x0E881095U) -+ { -+ unsigned int resolver, map; -+ -+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8)); -+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12)); -+ if (err) -+ break; -+ -+ regs->gr[20] = instruction_pointer(regs)+8; -+ regs->gr[21] = map; -+ regs->gr[22] = resolver; -+ regs->iaoq[0] = resolver | 3UL; -+ regs->iaoq[1] = regs->iaoq[0] + 4; -+ return 3; -+ } -+ } -+ } while (0); -+#endif -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+ -+#ifndef CONFIG_PAX_EMUSIGRT -+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) -+ return 1; -+#endif -+ -+ do { /* PaX: rt_sigreturn emulation */ -+ unsigned int ldi1, ldi2, bel, nop; -+ -+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs)); -+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4)); -+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8)); -+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12)); -+ -+ if (err) -+ break; -+ -+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) && -+ ldi2 == 0x3414015AU && -+ bel == 0xE4008200U && -+ nop == 0x08000240U) -+ { -+ regs->gr[25] = (ldi1 & 2) >> 1; -+ regs->gr[20] = __NR_rt_sigreturn; -+ regs->gr[31] = regs->iaoq[1] + 16; -+ regs->sr[0] = regs->iasq[1]; -+ regs->iaoq[0] = 0x100UL; -+ regs->iaoq[1] = regs->iaoq[0] + 4; -+ regs->iasq[0] = regs->sr[2]; -+ regs->iasq[1] = regs->sr[2]; -+ return 2; -+ } -+ } while (0); -+#endif -+ -+ return 1; -+} -+ -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 5; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - int fixup_exception(struct pt_regs *regs) - { - const struct exception_table_entry *fix; -@@ -234,8 +345,33 @@ retry: - - good_area: - -- if ((vma->vm_flags & acc_type) != acc_type) -+ if ((vma->vm_flags & acc_type) != acc_type) { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) && -+ (address & ~3UL) == instruction_pointer(regs)) -+ { -+ up_read(&mm->mmap_sem); -+ switch (pax_handle_fetch_fault(regs)) { -+ -+#ifdef CONFIG_PAX_EMUPLT -+ case 3: -+ return; -+#endif -+ -+#ifdef CONFIG_PAX_EMUTRAMP -+ case 2: -+ return; -+#endif -+ -+ } -+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - goto bad_area; -+ } - - /* - * If for any reason at all we couldn't handle the fault, make -diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig -index ee3c660..afa4212 100644 ---- a/arch/powerpc/Kconfig -+++ b/arch/powerpc/Kconfig -@@ -394,6 +394,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE - config KEXEC - bool "kexec system call" - depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) -+ depends on !GRKERNSEC_KMEM - help - kexec is a system call that implements the ability to shutdown your - current kernel, and to start another kernel. It is like a reboot -diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h -index e3b1d41..8e81edf 100644 ---- a/arch/powerpc/include/asm/atomic.h -+++ b/arch/powerpc/include/asm/atomic.h -@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v) - return t1; - } - -+#define atomic64_read_unchecked(v) atomic64_read(v) -+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) -+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) -+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) -+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) -+#define atomic64_inc_unchecked(v) atomic64_inc(v) -+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) -+#define atomic64_dec_unchecked(v) atomic64_dec(v) -+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) -+ - #endif /* __powerpc64__ */ - - #endif /* __KERNEL__ */ -diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h -index f89da80..7f5b05a 100644 ---- a/arch/powerpc/include/asm/barrier.h -+++ b/arch/powerpc/include/asm/barrier.h -@@ -73,7 +73,7 @@ - do { \ - compiletime_assert_atomic_type(*p); \ - __lwsync(); \ -- ACCESS_ONCE(*p) = (v); \ -+ ACCESS_ONCE_RW(*p) = (v); \ - } while (0) - - #define smp_load_acquire(p) \ -diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h -index ed0afc1..0332825 100644 ---- a/arch/powerpc/include/asm/cache.h -+++ b/arch/powerpc/include/asm/cache.h -@@ -3,6 +3,7 @@ - - #ifdef __KERNEL__ - -+#include <linux/const.h> - - /* bytes per L1 cache line */ - #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) -@@ -22,7 +23,7 @@ - #define L1_CACHE_SHIFT 7 - #endif - --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #define SMP_CACHE_BYTES L1_CACHE_BYTES - -diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h -index 935b5e7..7001d2d 100644 ---- a/arch/powerpc/include/asm/elf.h -+++ b/arch/powerpc/include/asm/elf.h -@@ -28,8 +28,19 @@ - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - --extern unsigned long randomize_et_dyn(unsigned long base); --#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) -+#define ELF_ET_DYN_BASE (0x20000000) -+ -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (0x10000000UL) -+ -+#ifdef __powerpc64__ -+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28) -+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28) -+#else -+#define PAX_DELTA_MMAP_LEN 15 -+#define PAX_DELTA_STACK_LEN 15 -+#endif -+#endif - - #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) - -@@ -127,10 +138,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, - (0x7ff >> (PAGE_SHIFT - 12)) : \ - (0x3ffff >> (PAGE_SHIFT - 12))) - --extern unsigned long arch_randomize_brk(struct mm_struct *mm); --#define arch_randomize_brk arch_randomize_brk -- -- - #ifdef CONFIG_SPU_BASE - /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */ - #define NT_SPU 1 -diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h -index 8196e9c..d83a9f3 100644 ---- a/arch/powerpc/include/asm/exec.h -+++ b/arch/powerpc/include/asm/exec.h -@@ -4,6 +4,6 @@ - #ifndef _ASM_POWERPC_EXEC_H - #define _ASM_POWERPC_EXEC_H - --extern unsigned long arch_align_stack(unsigned long sp); -+#define arch_align_stack(x) ((x) & ~0xfUL) - - #endif /* _ASM_POWERPC_EXEC_H */ -diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h -index 5acabbd..7ea14fa 100644 ---- a/arch/powerpc/include/asm/kmap_types.h -+++ b/arch/powerpc/include/asm/kmap_types.h -@@ -10,7 +10,7 @@ - * 2 of the License, or (at your option) any later version. - */ - --#define KM_TYPE_NR 16 -+#define KM_TYPE_NR 17 - - #endif /* __KERNEL__ */ - #endif /* _ASM_POWERPC_KMAP_TYPES_H */ -diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h -index b8da913..60b608a 100644 ---- a/arch/powerpc/include/asm/local.h -+++ b/arch/powerpc/include/asm/local.h -@@ -9,15 +9,26 @@ typedef struct - atomic_long_t a; - } local_t; - -+typedef struct -+{ -+ atomic_long_unchecked_t a; -+} local_unchecked_t; -+ - #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } - - #define local_read(l) atomic_long_read(&(l)->a) -+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a) - #define local_set(l,i) atomic_long_set(&(l)->a, (i)) -+#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i)) - - #define local_add(i,l) atomic_long_add((i),(&(l)->a)) -+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a)) - #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) -+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a)) - #define local_inc(l) atomic_long_inc(&(l)->a) -+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a) - #define local_dec(l) atomic_long_dec(&(l)->a) -+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a) - - static __inline__ long local_add_return(long a, local_t *l) - { -@@ -35,6 +46,7 @@ static __inline__ long local_add_return(long a, local_t *l) - - return t; - } -+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a)) - - #define local_add_negative(a, l) (local_add_return((a), (l)) < 0) - -@@ -54,6 +66,7 @@ static __inline__ long local_sub_return(long a, local_t *l) - - return t; - } -+#define local_sub_return_unchecked(i, l) atomic_long_sub_return_unchecked((i), (&(l)->a)) - - static __inline__ long local_inc_return(local_t *l) - { -@@ -101,6 +114,8 @@ static __inline__ long local_dec_return(local_t *l) - - #define local_cmpxchg(l, o, n) \ - (cmpxchg_local(&((l)->a.counter), (o), (n))) -+#define local_cmpxchg_unchecked(l, o, n) \ -+ (cmpxchg_local(&((l)->a.counter), (o), (n))) - #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) - - /** -diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h -index 8565c25..2865190 100644 ---- a/arch/powerpc/include/asm/mman.h -+++ b/arch/powerpc/include/asm/mman.h -@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot) - } - #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot) - --static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) -+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags) - { - return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); - } -diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h -index 32e4e21..62afb12 100644 ---- a/arch/powerpc/include/asm/page.h -+++ b/arch/powerpc/include/asm/page.h -@@ -230,8 +230,9 @@ extern long long virt_phys_offset; - * and needs to be executable. This means the whole heap ends - * up being executable. - */ --#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ -- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -+#define VM_DATA_DEFAULT_FLAGS32 \ -+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ -+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - - #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -@@ -259,6 +260,9 @@ extern long long virt_phys_offset; - #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) - #endif - -+#define ktla_ktva(addr) (addr) -+#define ktva_ktla(addr) (addr) -+ - #ifndef CONFIG_PPC_BOOK3S_64 - /* - * Use the top bit of the higher-level page table entries to indicate whether -diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h -index 88693ce..ac6f9ab 100644 ---- a/arch/powerpc/include/asm/page_64.h -+++ b/arch/powerpc/include/asm/page_64.h -@@ -153,15 +153,18 @@ do { \ - * stack by default, so in the absence of a PT_GNU_STACK program header - * we turn execute permission off. - */ --#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ -- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -+#define VM_STACK_DEFAULT_FLAGS32 \ -+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ -+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - - #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -+#ifndef CONFIG_PAX_PAGEEXEC - #define VM_STACK_DEFAULT_FLAGS \ - (is_32bit_task() ? \ - VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) -+#endif - - #include <asm-generic/getorder.h> - -diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h -index 4b0be20..c15a27d 100644 ---- a/arch/powerpc/include/asm/pgalloc-64.h -+++ b/arch/powerpc/include/asm/pgalloc-64.h -@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) - #ifndef CONFIG_PPC_64K_PAGES - - #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) -+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD)) - - static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) - { -@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) - pud_set(pud, (unsigned long)pmd); - } - -+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) -+{ -+ pud_populate(mm, pud, pmd); -+} -+ - #define pmd_populate(mm, pmd, pte_page) \ - pmd_populate_kernel(mm, pmd, page_address(pte_page)) - #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) -@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table); - #endif - - #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) -+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd)) - - static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, - pte_t *pte) -diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h -index 3ebb188..e17dddf 100644 ---- a/arch/powerpc/include/asm/pgtable.h -+++ b/arch/powerpc/include/asm/pgtable.h -@@ -2,6 +2,7 @@ - #define _ASM_POWERPC_PGTABLE_H - #ifdef __KERNEL__ - -+#include <linux/const.h> - #ifndef __ASSEMBLY__ - #include <linux/mmdebug.h> - #include <asm/processor.h> /* For TASK_SIZE */ -diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h -index 4aad413..85d86bf 100644 ---- a/arch/powerpc/include/asm/pte-hash32.h -+++ b/arch/powerpc/include/asm/pte-hash32.h -@@ -21,6 +21,7 @@ - #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ - #define _PAGE_USER 0x004 /* usermode access allowed */ - #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ -+#define _PAGE_EXEC _PAGE_GUARDED - #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ - #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ - #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ -diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h -index ce17815..c5574cc 100644 ---- a/arch/powerpc/include/asm/reg.h -+++ b/arch/powerpc/include/asm/reg.h -@@ -249,6 +249,7 @@ - #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ - #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ - #define DSISR_NOHPTE 0x40000000 /* no translation found */ -+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */ - #define DSISR_PROTFAULT 0x08000000 /* protection fault */ - #define DSISR_ISSTORE 0x02000000 /* access was a store */ - #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ -diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h -index 084e080..9415a3d 100644 ---- a/arch/powerpc/include/asm/smp.h -+++ b/arch/powerpc/include/asm/smp.h -@@ -51,7 +51,7 @@ struct smp_ops_t { - int (*cpu_disable)(void); - void (*cpu_die)(unsigned int nr); - int (*cpu_bootable)(unsigned int nr); --}; -+} __no_const; - - extern void smp_send_debugger_break(void); - extern void start_secondary_resume(void); -diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h -index b034ecd..af7e31f 100644 ---- a/arch/powerpc/include/asm/thread_info.h -+++ b/arch/powerpc/include/asm/thread_info.h -@@ -107,6 +107,8 @@ static inline struct thread_info *current_thread_info(void) - #if defined(CONFIG_PPC64) - #define TIF_ELF2ABI 18 /* function descriptors must die! */ - #endif -+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */ -+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */ - - /* as above, but as bit values */ - #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) -@@ -125,9 +127,10 @@ static inline struct thread_info *current_thread_info(void) - #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) - #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) - #define _TIF_NOHZ (1<<TIF_NOHZ) -+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) - #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ -- _TIF_NOHZ) -+ _TIF_NOHZ | _TIF_GRSEC_SETXID) - - #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ -diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h -index 9485b43..3bd3c16 100644 ---- a/arch/powerpc/include/asm/uaccess.h -+++ b/arch/powerpc/include/asm/uaccess.h -@@ -58,6 +58,7 @@ - - #endif - -+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) - #define access_ok(type, addr, size) \ - (__chk_user_ptr(addr), \ - __access_ok((__force unsigned long)(addr), (size), get_fs())) -@@ -318,52 +319,6 @@ do { \ - extern unsigned long __copy_tofrom_user(void __user *to, - const void __user *from, unsigned long size); - --#ifndef __powerpc64__ -- --static inline unsigned long copy_from_user(void *to, -- const void __user *from, unsigned long n) --{ -- unsigned long over; -- -- if (access_ok(VERIFY_READ, from, n)) -- return __copy_tofrom_user((__force void __user *)to, from, n); -- if ((unsigned long)from < TASK_SIZE) { -- over = (unsigned long)from + n - TASK_SIZE; -- return __copy_tofrom_user((__force void __user *)to, from, -- n - over) + over; -- } -- return n; --} -- --static inline unsigned long copy_to_user(void __user *to, -- const void *from, unsigned long n) --{ -- unsigned long over; -- -- if (access_ok(VERIFY_WRITE, to, n)) -- return __copy_tofrom_user(to, (__force void __user *)from, n); -- if ((unsigned long)to < TASK_SIZE) { -- over = (unsigned long)to + n - TASK_SIZE; -- return __copy_tofrom_user(to, (__force void __user *)from, -- n - over) + over; -- } -- return n; --} -- --#else /* __powerpc64__ */ -- --#define __copy_in_user(to, from, size) \ -- __copy_tofrom_user((to), (from), (size)) -- --extern unsigned long copy_from_user(void *to, const void __user *from, -- unsigned long n); --extern unsigned long copy_to_user(void __user *to, const void *from, -- unsigned long n); --extern unsigned long copy_in_user(void __user *to, const void __user *from, -- unsigned long n); -- --#endif /* __powerpc64__ */ -- - static inline unsigned long __copy_from_user_inatomic(void *to, - const void __user *from, unsigned long n) - { -@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to, - if (ret == 0) - return 0; - } -+ -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); -+ - return __copy_tofrom_user((__force void __user *)to, from, n); - } - -@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, - if (ret == 0) - return 0; - } -+ -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); -+ - return __copy_tofrom_user(to, (__force const void __user *)from, n); - } - -@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to, - return __copy_to_user_inatomic(to, from, size); - } - -+#ifndef __powerpc64__ -+ -+static inline unsigned long __must_check copy_from_user(void *to, -+ const void __user *from, unsigned long n) -+{ -+ unsigned long over; -+ -+ if ((long)n < 0) -+ return n; -+ -+ if (access_ok(VERIFY_READ, from, n)) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); -+ return __copy_tofrom_user((__force void __user *)to, from, n); -+ } -+ if ((unsigned long)from < TASK_SIZE) { -+ over = (unsigned long)from + n - TASK_SIZE; -+ if (!__builtin_constant_p(n - over)) -+ check_object_size(to, n - over, false); -+ return __copy_tofrom_user((__force void __user *)to, from, -+ n - over) + over; -+ } -+ return n; -+} -+ -+static inline unsigned long __must_check copy_to_user(void __user *to, -+ const void *from, unsigned long n) -+{ -+ unsigned long over; -+ -+ if ((long)n < 0) -+ return n; -+ -+ if (access_ok(VERIFY_WRITE, to, n)) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); -+ return __copy_tofrom_user(to, (__force void __user *)from, n); -+ } -+ if ((unsigned long)to < TASK_SIZE) { -+ over = (unsigned long)to + n - TASK_SIZE; -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n - over, true); -+ return __copy_tofrom_user(to, (__force void __user *)from, -+ n - over) + over; -+ } -+ return n; -+} -+ -+#else /* __powerpc64__ */ -+ -+#define __copy_in_user(to, from, size) \ -+ __copy_tofrom_user((to), (from), (size)) -+ -+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) -+{ -+ if ((long)n < 0 || n > INT_MAX) -+ return n; -+ -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); -+ -+ if (likely(access_ok(VERIFY_READ, from, n))) -+ n = __copy_from_user(to, from, n); -+ else -+ memset(to, 0, n); -+ return n; -+} -+ -+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) -+{ -+ if ((long)n < 0 || n > INT_MAX) -+ return n; -+ -+ if (likely(access_ok(VERIFY_WRITE, to, n))) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); -+ n = __copy_to_user(to, from, n); -+ } -+ return n; -+} -+ -+extern unsigned long copy_in_user(void __user *to, const void __user *from, -+ unsigned long n); -+ -+#endif /* __powerpc64__ */ -+ - extern unsigned long __clear_user(void __user *addr, unsigned long size); - - static inline unsigned long clear_user(void __user *addr, unsigned long size) -diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile -index fcc9a89..10f8e7e 100644 ---- a/arch/powerpc/kernel/Makefile -+++ b/arch/powerpc/kernel/Makefile -@@ -14,6 +14,11 @@ CFLAGS_prom_init.o += -fPIC - CFLAGS_btext.o += -fPIC - endif - -+CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS) -+CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS) -+CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS) -+CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS) -+ - ifdef CONFIG_FUNCTION_TRACER - # Do not trace early boot code - CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog -@@ -26,6 +31,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog - CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog - endif - -+CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS) -+ - obj-y := cputable.o ptrace.o syscalls.o \ - irq.o align.o signal_32.o pmc.o vdso.o \ - process.o systbl.o idle.o \ -diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S -index 063b65d..7a26e9d 100644 ---- a/arch/powerpc/kernel/exceptions-64e.S -+++ b/arch/powerpc/kernel/exceptions-64e.S -@@ -771,6 +771,7 @@ storage_fault_common: - std r14,_DAR(r1) - std r15,_DSISR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD -+ bl .save_nvgprs - mr r4,r14 - mr r5,r15 - ld r14,PACA_EXGEN+EX_R14(r13) -@@ -779,8 +780,7 @@ storage_fault_common: - cmpdi r3,0 - bne- 1f - b .ret_from_except_lite --1: bl .save_nvgprs -- mr r5,r3 -+1: mr r5,r3 - addi r3,r1,STACK_FRAME_OVERHEAD - ld r4,_DAR(r1) - bl .bad_page_fault -diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S -index 5193116..1fed658 100644 ---- a/arch/powerpc/kernel/exceptions-64s.S -+++ b/arch/powerpc/kernel/exceptions-64s.S -@@ -1584,10 +1584,10 @@ handle_page_fault: - 11: ld r4,_DAR(r1) - ld r5,_DSISR(r1) - addi r3,r1,STACK_FRAME_OVERHEAD -+ bl .save_nvgprs - bl .do_page_fault - cmpdi r3,0 - beq+ 12f -- bl .save_nvgprs - mr r5,r3 - addi r3,r1,STACK_FRAME_OVERHEAD - lwz r4,_DAR(r1) -diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c -index 1d0848b..d74685f 100644 ---- a/arch/powerpc/kernel/irq.c -+++ b/arch/powerpc/kernel/irq.c -@@ -447,6 +447,8 @@ void migrate_irqs(void) - } - #endif - -+extern void gr_handle_kernel_exploit(void); -+ - static inline void check_stack_overflow(void) - { - #ifdef CONFIG_DEBUG_STACKOVERFLOW -@@ -459,6 +461,7 @@ static inline void check_stack_overflow(void) - printk("do_IRQ: stack overflow: %ld\n", - sp - sizeof(struct thread_info)); - dump_stack(); -+ gr_handle_kernel_exploit(); - } - #endif - } -diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c -index 6cff040..74ac5d1b 100644 ---- a/arch/powerpc/kernel/module_32.c -+++ b/arch/powerpc/kernel/module_32.c -@@ -161,7 +161,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr, - me->arch.core_plt_section = i; - } - if (!me->arch.core_plt_section || !me->arch.init_plt_section) { -- printk("Module doesn't contain .plt or .init.plt sections.\n"); -+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name); - return -ENOEXEC; - } - -@@ -191,11 +191,16 @@ static uint32_t do_plt_call(void *location, - - DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); - /* Init, or core PLT? */ -- if (location >= mod->module_core -- && location < mod->module_core + mod->core_size) -+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) || -+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw)) - entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; -- else -+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) || -+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw)) - entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; -+ else { -+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name); -+ return ~0UL; -+ } - - /* Find this entry, or if that fails, the next avail. entry */ - while (entry->jump[0]) { -@@ -299,7 +304,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, - } - #ifdef CONFIG_DYNAMIC_FTRACE - module->arch.tramp = -- do_plt_call(module->module_core, -+ do_plt_call(module->module_core_rx, - (unsigned long)ftrace_caller, - sechdrs, module); - #endif -diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c -index 31d0215..206af70 100644 ---- a/arch/powerpc/kernel/process.c -+++ b/arch/powerpc/kernel/process.c -@@ -1031,8 +1031,8 @@ void show_regs(struct pt_regs * regs) - * Lookup NIP late so we have the best change of getting the - * above info out without failing - */ -- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); -- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); -+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip); -+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link); - #endif - show_stack(current, (unsigned long *) regs->gpr[1]); - if (!user_mode(regs)) -@@ -1554,10 +1554,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) - newsp = stack[0]; - ip = stack[STACK_FRAME_LR_SAVE]; - if (!firstframe || ip != lr) { -- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); -+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip); - #ifdef CONFIG_FUNCTION_GRAPH_TRACER - if ((ip == rth || ip == mrth) && curr_frame >= 0) { -- printk(" (%pS)", -+ printk(" (%pA)", - (void *)current->ret_stack[curr_frame].ret); - curr_frame--; - } -@@ -1577,7 +1577,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) - struct pt_regs *regs = (struct pt_regs *) - (sp + STACK_FRAME_OVERHEAD); - lr = regs->link; -- printk("--- Exception: %lx at %pS\n LR = %pS\n", -+ printk("--- Exception: %lx at %pA\n LR = %pA\n", - regs->trap, (void *)regs->nip, (void *)lr); - firstframe = 1; - } -@@ -1613,58 +1613,3 @@ void notrace __ppc64_runlatch_off(void) - mtspr(SPRN_CTRLT, ctrl); - } - #endif /* CONFIG_PPC64 */ -- --unsigned long arch_align_stack(unsigned long sp) --{ -- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) -- sp -= get_random_int() & ~PAGE_MASK; -- return sp & ~0xf; --} -- --static inline unsigned long brk_rnd(void) --{ -- unsigned long rnd = 0; -- -- /* 8MB for 32bit, 1GB for 64bit */ -- if (is_32bit_task()) -- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); -- else -- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); -- -- return rnd << PAGE_SHIFT; --} -- --unsigned long arch_randomize_brk(struct mm_struct *mm) --{ -- unsigned long base = mm->brk; -- unsigned long ret; -- --#ifdef CONFIG_PPC_STD_MMU_64 -- /* -- * If we are using 1TB segments and we are allowed to randomise -- * the heap, we can put it above 1TB so it is backed by a 1TB -- * segment. Otherwise the heap will be in the bottom 1TB -- * which always uses 256MB segments and this may result in a -- * performance penalty. -- */ -- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) -- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); --#endif -- -- ret = PAGE_ALIGN(base + brk_rnd()); -- -- if (ret < mm->brk) -- return mm->brk; -- -- return ret; --} -- --unsigned long randomize_et_dyn(unsigned long base) --{ -- unsigned long ret = PAGE_ALIGN(base + brk_rnd()); -- -- if (ret < base) -- return base; -- -- return ret; --} -diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c -index 2e3d2bf..35df241 100644 ---- a/arch/powerpc/kernel/ptrace.c -+++ b/arch/powerpc/kernel/ptrace.c -@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request, - return ret; - } - -+#ifdef CONFIG_GRKERNSEC_SETXID -+extern void gr_delayed_cred_worker(void); -+#endif -+ - /* - * We must return the syscall number to actually look up in the table. - * This can be -1L to skip running any syscall at all. -@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs) - - secure_computing_strict(regs->gpr[0]); - -+#ifdef CONFIG_GRKERNSEC_SETXID -+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) -+ gr_delayed_cred_worker(); -+#endif -+ - if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) - /* -@@ -1808,6 +1817,11 @@ void do_syscall_trace_leave(struct pt_regs *regs) - { - int step; - -+#ifdef CONFIG_GRKERNSEC_SETXID -+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) -+ gr_delayed_cred_worker(); -+#endif -+ - audit_syscall_exit(regs); - - if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) -diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c -index e881e3f..0fed4bce 100644 ---- a/arch/powerpc/kernel/signal_32.c -+++ b/arch/powerpc/kernel/signal_32.c -@@ -1011,7 +1011,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, - /* Save user registers on the stack */ - frame = &rt_sf->uc.uc_mcontext; - addr = frame; -- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { -+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { - sigret = 0; - tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp; - } else { -diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c -index d501dc4..e5a0de0 100644 ---- a/arch/powerpc/kernel/signal_64.c -+++ b/arch/powerpc/kernel/signal_64.c -@@ -760,7 +760,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, - current->thread.fp_state.fpscr = 0; - - /* Set up to return from userspace. */ -- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { -+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { - regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; - } else { - err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); -diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c -index 33cd7a0..d615344 100644 ---- a/arch/powerpc/kernel/traps.c -+++ b/arch/powerpc/kernel/traps.c -@@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs) - return flags; - } - -+extern void gr_handle_kernel_exploit(void); -+ - static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, - int signr) - { -@@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, - panic("Fatal exception in interrupt"); - if (panic_on_oops) - panic("Fatal exception"); -+ -+ gr_handle_kernel_exploit(); -+ - do_exit(signr); - } - -diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c -index 094e45c..d82b848 100644 ---- a/arch/powerpc/kernel/vdso.c -+++ b/arch/powerpc/kernel/vdso.c -@@ -35,6 +35,7 @@ - #include <asm/vdso.h> - #include <asm/vdso_datapage.h> - #include <asm/setup.h> -+#include <asm/mman.h> - - #undef DEBUG - -@@ -221,7 +222,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) - vdso_base = VDSO32_MBASE; - #endif - -- current->mm->context.vdso_base = 0; -+ current->mm->context.vdso_base = ~0UL; - - /* vDSO has a problem and was disabled, just don't "enable" it for the - * process -@@ -241,7 +242,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) - vdso_base = get_unmapped_area(NULL, vdso_base, - (vdso_pages << PAGE_SHIFT) + - ((VDSO_ALIGNMENT - 1) & PAGE_MASK), -- 0, 0); -+ 0, MAP_PRIVATE | MAP_EXECUTABLE); - if (IS_ERR_VALUE(vdso_base)) { - rc = vdso_base; - goto fail_mmapsem; -diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c -index 3cf541a..ab2d825 100644 ---- a/arch/powerpc/kvm/powerpc.c -+++ b/arch/powerpc/kvm/powerpc.c -@@ -1153,7 +1153,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param) - } - EXPORT_SYMBOL_GPL(kvmppc_init_lpid); - --int kvm_arch_init(void *opaque) -+int kvm_arch_init(const void *opaque) - { - return 0; - } -diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c -index 5eea6f3..5d10396 100644 ---- a/arch/powerpc/lib/usercopy_64.c -+++ b/arch/powerpc/lib/usercopy_64.c -@@ -9,22 +9,6 @@ - #include <linux/module.h> - #include <asm/uaccess.h> - --unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) --{ -- if (likely(access_ok(VERIFY_READ, from, n))) -- n = __copy_from_user(to, from, n); -- else -- memset(to, 0, n); -- return n; --} -- --unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) --{ -- if (likely(access_ok(VERIFY_WRITE, to, n))) -- n = __copy_to_user(to, from, n); -- return n; --} -- - unsigned long copy_in_user(void __user *to, const void __user *from, - unsigned long n) - { -@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from, - return n; - } - --EXPORT_SYMBOL(copy_from_user); --EXPORT_SYMBOL(copy_to_user); - EXPORT_SYMBOL(copy_in_user); - -diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c -index 010fabf..e5c18a4 100644 ---- a/arch/powerpc/mm/fault.c -+++ b/arch/powerpc/mm/fault.c -@@ -33,6 +33,10 @@ - #include <linux/magic.h> - #include <linux/ratelimit.h> - #include <linux/context_tracking.h> -+#include <linux/slab.h> -+#include <linux/pagemap.h> -+#include <linux/compiler.h> -+#include <linux/unistd.h> - - #include <asm/firmware.h> - #include <asm/page.h> -@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs) - } - #endif - -+#ifdef CONFIG_PAX_PAGEEXEC -+/* -+ * PaX: decide what to do with offenders (regs->nip = fault address) -+ * -+ * returns 1 when task should be killed -+ */ -+static int pax_handle_fetch_fault(struct pt_regs *regs) -+{ -+ return 1; -+} -+ -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 5; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int __user *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - /* - * Check whether the instruction at regs->nip is a store using - * an update addressing form which will update r1. -@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, - * indicate errors in DSISR but can validly be set in SRR1. - */ - if (trap == 0x400) -- error_code &= 0x48200000; -+ error_code &= 0x58200000; - else - is_write = error_code & DSISR_ISSTORE; - #else -@@ -378,7 +409,7 @@ good_area: - * "undefined". Of those that can be set, this is the only - * one which seems bad. - */ -- if (error_code & 0x10000000) -+ if (error_code & DSISR_GUARDED) - /* Guarded storage error. */ - goto bad_area; - #endif /* CONFIG_8xx */ -@@ -393,7 +424,7 @@ good_area: - * processors use the same I/D cache coherency mechanism - * as embedded. - */ -- if (error_code & DSISR_PROTFAULT) -+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED)) - goto bad_area; - #endif /* CONFIG_PPC_STD_MMU */ - -@@ -485,6 +516,23 @@ bad_area: - bad_area_nosemaphore: - /* User mode accesses cause a SIGSEGV */ - if (user_mode(regs)) { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if (mm->pax_flags & MF_PAX_PAGEEXEC) { -+#ifdef CONFIG_PPC_STD_MMU -+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) { -+#else -+ if (is_exec && regs->nip == address) { -+#endif -+ switch (pax_handle_fetch_fault(regs)) { -+ } -+ -+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]); -+ do_group_exit(SIGKILL); -+ } -+ } -+#endif -+ - _exception(SIGSEGV, regs, code, address); - goto bail; - } -diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c -index cb8bdbe..cde4bc7 100644 ---- a/arch/powerpc/mm/mmap.c -+++ b/arch/powerpc/mm/mmap.c -@@ -53,10 +53,14 @@ static inline int mmap_is_legacy(void) - return sysctl_legacy_va_layout; - } - --static unsigned long mmap_rnd(void) -+static unsigned long mmap_rnd(struct mm_struct *mm) - { - unsigned long rnd = 0; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (current->flags & PF_RANDOMIZE) { - /* 8MB for 32bit, 1GB for 64bit */ - if (is_32bit_task()) -@@ -67,7 +71,7 @@ static unsigned long mmap_rnd(void) - return rnd << PAGE_SHIFT; - } - --static inline unsigned long mmap_base(void) -+static inline unsigned long mmap_base(struct mm_struct *mm) - { - unsigned long gap = rlimit(RLIMIT_STACK); - -@@ -76,7 +80,7 @@ static inline unsigned long mmap_base(void) - else if (gap > MAX_GAP) - gap = MAX_GAP; - -- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); -+ return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd(mm)); - } - - /* -@@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - */ - if (mmap_is_legacy()) { - mm->mmap_base = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area; - } else { -- mm->mmap_base = mmap_base(); -+ mm->mmap_base = mmap_base(mm); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } - } -diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c -index b0c75cc..ef7fb93 100644 ---- a/arch/powerpc/mm/slice.c -+++ b/arch/powerpc/mm/slice.c -@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, - if ((mm->task_size - len) < addr) - return 0; - vma = find_vma(mm, addr); -- return (!vma || (addr + len) <= vma->vm_start); -+ return check_heap_stack_gap(vma, addr, len, 0); - } - - static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) -@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm, - info.align_offset = 0; - - addr = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ addr += mm->delta_mmap; -+#endif -+ - while (addr < TASK_SIZE) { - info.low_limit = addr; - if (!slice_scan_available(addr, available, 1, &addr)) -@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len, - if (fixed && addr > (mm->task_size - len)) - return -ENOMEM; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP)) -+ addr = 0; -+#endif -+ - /* If hint, make sure it matches our alignment restrictions */ - if (!fixed && addr) { - addr = _ALIGN_UP(addr, 1ul << pshift); -diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c -index 4278acf..67fd0e6 100644 ---- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c -+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c -@@ -400,8 +400,8 @@ static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn, - } - - static struct pci_ops scc_pciex_pci_ops = { -- scc_pciex_read_config, -- scc_pciex_write_config, -+ .read = scc_pciex_read_config, -+ .write = scc_pciex_write_config, - }; - - static void pciex_clear_intr_all(unsigned int __iomem *base) -diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c -index 9098692..3d54cd1 100644 ---- a/arch/powerpc/platforms/cell/spufs/file.c -+++ b/arch/powerpc/platforms/cell/spufs/file.c -@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) - return VM_FAULT_NOPAGE; - } - --static int spufs_mem_mmap_access(struct vm_area_struct *vma, -+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma, - unsigned long address, -- void *buf, int len, int write) -+ void *buf, size_t len, int write) - { - struct spu_context *ctx = vma->vm_file->private_data; - unsigned long offset = address - vma->vm_start; -diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h -index 1d47061..0714963 100644 ---- a/arch/s390/include/asm/atomic.h -+++ b/arch/s390/include/asm/atomic.h -@@ -412,6 +412,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) - #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) - #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -+#define atomic64_read_unchecked(v) atomic64_read(v) -+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) -+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) -+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) -+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) -+#define atomic64_inc_unchecked(v) atomic64_inc(v) -+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) -+#define atomic64_dec_unchecked(v) atomic64_dec(v) -+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) -+ - #define smp_mb__before_atomic_dec() smp_mb() - #define smp_mb__after_atomic_dec() smp_mb() - #define smp_mb__before_atomic_inc() smp_mb() -diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h -index 578680f..0eb3b11 100644 ---- a/arch/s390/include/asm/barrier.h -+++ b/arch/s390/include/asm/barrier.h -@@ -36,7 +36,7 @@ - do { \ - compiletime_assert_atomic_type(*p); \ - barrier(); \ -- ACCESS_ONCE(*p) = (v); \ -+ ACCESS_ONCE_RW(*p) = (v); \ - } while (0) - - #define smp_load_acquire(p) \ -diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h -index 4d7ccac..d03d0ad 100644 ---- a/arch/s390/include/asm/cache.h -+++ b/arch/s390/include/asm/cache.h -@@ -9,8 +9,10 @@ - #ifndef __ARCH_S390_CACHE_H - #define __ARCH_S390_CACHE_H - --#define L1_CACHE_BYTES 256 -+#include <linux/const.h> -+ - #define L1_CACHE_SHIFT 8 -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - #define NET_SKB_PAD 32 - - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) -diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h -index 78f4f87..598ce39 100644 ---- a/arch/s390/include/asm/elf.h -+++ b/arch/s390/include/asm/elf.h -@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled; - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - --extern unsigned long randomize_et_dyn(unsigned long base); --#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2)) -+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) -+ -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL) -+ -+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26) -+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26) -+#endif - - /* This yields a mask that user programs can use to figure out what - instruction set this CPU supports. */ -@@ -222,9 +228,6 @@ struct linux_binprm; - #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 - int arch_setup_additional_pages(struct linux_binprm *, int); - --extern unsigned long arch_randomize_brk(struct mm_struct *mm); --#define arch_randomize_brk arch_randomize_brk -- - void *fill_cpu_elf_notes(void *ptr, struct save_area *sa); - - #endif -diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h -index c4a93d6..4d2a9b4 100644 ---- a/arch/s390/include/asm/exec.h -+++ b/arch/s390/include/asm/exec.h -@@ -7,6 +7,6 @@ - #ifndef __ASM_EXEC_H - #define __ASM_EXEC_H - --extern unsigned long arch_align_stack(unsigned long sp); -+#define arch_align_stack(x) ((x) & ~0xfUL) - - #endif /* __ASM_EXEC_H */ -diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h -index 79330af..254cf37 100644 ---- a/arch/s390/include/asm/uaccess.h -+++ b/arch/s390/include/asm/uaccess.h -@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size) - __range_ok((unsigned long)(addr), (size)); \ - }) - -+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) - #define access_ok(type, addr, size) __access_ok(addr, size) - - /* -@@ -245,6 +246,10 @@ static inline unsigned long __must_check - copy_to_user(void __user *to, const void *from, unsigned long n) - { - might_fault(); -+ -+ if ((long)n < 0) -+ return n; -+ - return __copy_to_user(to, from, n); - } - -@@ -268,6 +273,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n) - static inline unsigned long __must_check - __copy_from_user(void *to, const void __user *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - return uaccess.copy_from_user(n, from, to); - } - -@@ -296,10 +304,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct") - static inline unsigned long __must_check - copy_from_user(void *to, const void __user *from, unsigned long n) - { -- unsigned int sz = __compiletime_object_size(to); -+ size_t sz = __compiletime_object_size(to); - - might_fault(); -- if (unlikely(sz != -1 && sz < n)) { -+ -+ if ((long)n < 0) -+ return n; -+ -+ if (unlikely(sz != (size_t)-1 && sz < n)) { - copy_from_user_overflow(); - return n; - } -diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c -index b89b591..fd9609d 100644 ---- a/arch/s390/kernel/module.c -+++ b/arch/s390/kernel/module.c -@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, - - /* Increase core size by size of got & plt and set start - offsets for got and plt. */ -- me->core_size = ALIGN(me->core_size, 4); -- me->arch.got_offset = me->core_size; -- me->core_size += me->arch.got_size; -- me->arch.plt_offset = me->core_size; -- me->core_size += me->arch.plt_size; -+ me->core_size_rw = ALIGN(me->core_size_rw, 4); -+ me->arch.got_offset = me->core_size_rw; -+ me->core_size_rw += me->arch.got_size; -+ me->arch.plt_offset = me->core_size_rx; -+ me->core_size_rx += me->arch.plt_size; - return 0; - } - -@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - if (info->got_initialized == 0) { - Elf_Addr *gotent; - -- gotent = me->module_core + me->arch.got_offset + -+ gotent = me->module_core_rw + me->arch.got_offset + - info->got_offset; - *gotent = val; - info->got_initialized = 1; -@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - rc = apply_rela_bits(loc, val, 0, 64, 0); - else if (r_type == R_390_GOTENT || - r_type == R_390_GOTPLTENT) { -- val += (Elf_Addr) me->module_core - loc; -+ val += (Elf_Addr) me->module_core_rw - loc; - rc = apply_rela_bits(loc, val, 1, 32, 1); - } - break; -@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ - if (info->plt_initialized == 0) { - unsigned int *ip; -- ip = me->module_core + me->arch.plt_offset + -+ ip = me->module_core_rx + me->arch.plt_offset + - info->plt_offset; - #ifndef CONFIG_64BIT - ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ -@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - val - loc + 0xffffUL < 0x1ffffeUL) || - (r_type == R_390_PLT32DBL && - val - loc + 0xffffffffULL < 0x1fffffffeULL))) -- val = (Elf_Addr) me->module_core + -+ val = (Elf_Addr) me->module_core_rx + - me->arch.plt_offset + - info->plt_offset; - val += rela->r_addend - loc; -@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - case R_390_GOTOFF32: /* 32 bit offset to GOT. */ - case R_390_GOTOFF64: /* 64 bit offset to GOT. */ - val = val + rela->r_addend - -- ((Elf_Addr) me->module_core + me->arch.got_offset); -+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset); - if (r_type == R_390_GOTOFF16) - rc = apply_rela_bits(loc, val, 0, 16, 0); - else if (r_type == R_390_GOTOFF32) -@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, - break; - case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ - case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ -- val = (Elf_Addr) me->module_core + me->arch.got_offset + -+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset + - rela->r_addend - loc; - if (r_type == R_390_GOTPC) - rc = apply_rela_bits(loc, val, 1, 32, 0); -diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c -index dd14532..1dfc145 100644 ---- a/arch/s390/kernel/process.c -+++ b/arch/s390/kernel/process.c -@@ -242,37 +242,3 @@ unsigned long get_wchan(struct task_struct *p) - } - return 0; - } -- --unsigned long arch_align_stack(unsigned long sp) --{ -- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) -- sp -= get_random_int() & ~PAGE_MASK; -- return sp & ~0xf; --} -- --static inline unsigned long brk_rnd(void) --{ -- /* 8MB for 32bit, 1GB for 64bit */ -- if (is_32bit_task()) -- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; -- else -- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT; --} -- --unsigned long arch_randomize_brk(struct mm_struct *mm) --{ -- unsigned long ret; -- -- ret = PAGE_ALIGN(mm->brk + brk_rnd()); -- return (ret > mm->brk) ? ret : mm->brk; --} -- --unsigned long randomize_et_dyn(unsigned long base) --{ -- unsigned long ret; -- -- if (!(current->flags & PF_RANDOMIZE)) -- return base; -- ret = PAGE_ALIGN(base + brk_rnd()); -- return (ret > base) ? ret : base; --} -diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c -index 9b436c2..5c64ae8 100644 ---- a/arch/s390/mm/mmap.c -+++ b/arch/s390/mm/mmap.c -@@ -58,6 +58,12 @@ static inline int mmap_is_legacy(void) - - static unsigned long mmap_rnd(void) - { -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) -+ return 0; -+#endif -+ - if (!(current->flags & PF_RANDOMIZE)) - return 0; - /* 8MB randomization for mmap_base */ -@@ -95,9 +101,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - */ - if (mmap_is_legacy()) { - mm->mmap_base = mmap_base_legacy(); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area; - } else { - mm->mmap_base = mmap_base(); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } - } -@@ -170,9 +188,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - */ - if (mmap_is_legacy()) { - mm->mmap_base = mmap_base_legacy(); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = s390_get_unmapped_area; - } else { - mm->mmap_base = mmap_base(); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+#endif -+ - mm->get_unmapped_area = s390_get_unmapped_area_topdown; - } - } -diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h -index ae3d59f..f65f075 100644 ---- a/arch/score/include/asm/cache.h -+++ b/arch/score/include/asm/cache.h -@@ -1,7 +1,9 @@ - #ifndef _ASM_SCORE_CACHE_H - #define _ASM_SCORE_CACHE_H - -+#include <linux/const.h> -+ - #define L1_CACHE_SHIFT 4 --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #endif /* _ASM_SCORE_CACHE_H */ -diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h -index f9f3cd5..58ff438 100644 ---- a/arch/score/include/asm/exec.h -+++ b/arch/score/include/asm/exec.h -@@ -1,6 +1,6 @@ - #ifndef _ASM_SCORE_EXEC_H - #define _ASM_SCORE_EXEC_H - --extern unsigned long arch_align_stack(unsigned long sp); -+#define arch_align_stack(x) (x) - - #endif /* _ASM_SCORE_EXEC_H */ -diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c -index a1519ad3..e8ac1ff 100644 ---- a/arch/score/kernel/process.c -+++ b/arch/score/kernel/process.c -@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task) - - return task_pt_regs(task)->cp0_epc; - } -- --unsigned long arch_align_stack(unsigned long sp) --{ -- return sp; --} -diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h -index ef9e555..331bd29 100644 ---- a/arch/sh/include/asm/cache.h -+++ b/arch/sh/include/asm/cache.h -@@ -9,10 +9,11 @@ - #define __ASM_SH_CACHE_H - #ifdef __KERNEL__ - -+#include <linux/const.h> - #include <linux/init.h> - #include <cpu/cache.h> - --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) - -diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c -index 6777177..cb5e44f 100644 ---- a/arch/sh/mm/mmap.c -+++ b/arch/sh/mm/mmap.c -@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - int do_colour_align; -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); - struct vm_unmapped_area_info info; - - if (flags & MAP_FIXED) { -@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - if (filp || (flags & MAP_SHARED)) - do_colour_align = 1; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - if (do_colour_align) - addr = COLOUR_ALIGN(addr, pgoff); -@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) - return addr; - } - - info.flags = 0; - info.length = len; -- info.low_limit = TASK_UNMAPPED_BASE; -+ info.low_limit = mm->mmap_base; - info.high_limit = TASK_SIZE; - info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0; - info.align_offset = pgoff << PAGE_SHIFT; -@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - struct mm_struct *mm = current->mm; - unsigned long addr = addr0; - int do_colour_align; -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); - struct vm_unmapped_area_info info; - - if (flags & MAP_FIXED) { -@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - if (filp || (flags & MAP_SHARED)) - do_colour_align = 1; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - /* requesting a specific address */ - if (addr) { - if (do_colour_align) -@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (TASK_SIZE - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) - return addr; - } - -@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - VM_BUG_ON(addr != -ENOMEM); - info.flags = 0; - info.low_limit = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ info.low_limit += mm->delta_mmap; -+#endif -+ - info.high_limit = TASK_SIZE; - addr = vm_unmapped_area(&info); - } -diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h -index be56a24..eaef2ca 100644 ---- a/arch/sparc/include/asm/atomic_64.h -+++ b/arch/sparc/include/asm/atomic_64.h -@@ -14,18 +14,40 @@ - #define ATOMIC64_INIT(i) { (i) } - - #define atomic_read(v) (*(volatile int *)&(v)->counter) -+static inline int atomic_read_unchecked(const atomic_unchecked_t *v) -+{ -+ return *(const volatile int *)&v->counter; -+} - #define atomic64_read(v) (*(volatile long *)&(v)->counter) -+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) -+{ -+ return *(const volatile long *)&v->counter; -+} - - #define atomic_set(v, i) (((v)->counter) = i) -+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) -+{ -+ v->counter = i; -+} - #define atomic64_set(v, i) (((v)->counter) = i) -+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) -+{ -+ v->counter = i; -+} - - extern void atomic_add(int, atomic_t *); -+extern void atomic_add_unchecked(int, atomic_unchecked_t *); - extern void atomic64_add(long, atomic64_t *); -+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *); - extern void atomic_sub(int, atomic_t *); -+extern void atomic_sub_unchecked(int, atomic_unchecked_t *); - extern void atomic64_sub(long, atomic64_t *); -+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *); - - extern int atomic_add_ret(int, atomic_t *); -+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *); - extern long atomic64_add_ret(long, atomic64_t *); -+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *); - extern int atomic_sub_ret(int, atomic_t *); - extern long atomic64_sub_ret(long, atomic64_t *); - -@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *); - #define atomic64_dec_return(v) atomic64_sub_ret(1, v) - - #define atomic_inc_return(v) atomic_add_ret(1, v) -+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) -+{ -+ return atomic_add_ret_unchecked(1, v); -+} - #define atomic64_inc_return(v) atomic64_add_ret(1, v) -+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) -+{ -+ return atomic64_add_ret_unchecked(1, v); -+} - - #define atomic_sub_return(i, v) atomic_sub_ret(i, v) - #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) - - #define atomic_add_return(i, v) atomic_add_ret(i, v) -+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) -+{ -+ return atomic_add_ret_unchecked(i, v); -+} - #define atomic64_add_return(i, v) atomic64_add_ret(i, v) -+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) -+{ -+ return atomic64_add_ret_unchecked(i, v); -+} - - /* - * atomic_inc_and_test - increment and test -@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *); - * other cases. - */ - #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) -+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) -+{ -+ return atomic_inc_return_unchecked(v) == 0; -+} - #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) - - #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) -@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *); - #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) - - #define atomic_inc(v) atomic_add(1, v) -+static inline void atomic_inc_unchecked(atomic_unchecked_t *v) -+{ -+ atomic_add_unchecked(1, v); -+} - #define atomic64_inc(v) atomic64_add(1, v) -+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) -+{ -+ atomic64_add_unchecked(1, v); -+} - - #define atomic_dec(v) atomic_sub(1, v) -+static inline void atomic_dec_unchecked(atomic_unchecked_t *v) -+{ -+ atomic_sub_unchecked(1, v); -+} - #define atomic64_dec(v) atomic64_sub(1, v) -+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) -+{ -+ atomic64_sub_unchecked(1, v); -+} - - #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) - #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) - - #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) -+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) -+{ -+ return cmpxchg(&v->counter, old, new); -+} - #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) -+{ -+ return xchg(&v->counter, new); -+} - - static inline int __atomic_add_unless(atomic_t *v, int a, int u) - { -- int c, old; -+ int c, old, new; - c = atomic_read(v); - for (;;) { -- if (unlikely(c == (u))) -+ if (unlikely(c == u)) - break; -- old = atomic_cmpxchg((v), c, c + (a)); -+ -+ asm volatile("addcc %2, %0, %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "tvs %%icc, 6\n" -+#endif -+ -+ : "=r" (new) -+ : "0" (c), "ir" (a) -+ : "cc"); -+ -+ old = atomic_cmpxchg(v, c, new); - if (likely(old == c)) - break; - c = old; -@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) - #define atomic64_cmpxchg(v, o, n) \ - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) - #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new) -+{ -+ return xchg(&v->counter, new); -+} - - static inline long atomic64_add_unless(atomic64_t *v, long a, long u) - { -- long c, old; -+ long c, old, new; - c = atomic64_read(v); - for (;;) { -- if (unlikely(c == (u))) -+ if (unlikely(c == u)) - break; -- old = atomic64_cmpxchg((v), c, c + (a)); -+ -+ asm volatile("addcc %2, %0, %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "tvs %%xcc, 6\n" -+#endif -+ -+ : "=r" (new) -+ : "0" (c), "ir" (a) -+ : "cc"); -+ -+ old = atomic64_cmpxchg(v, c, new); - if (likely(old == c)) - break; - c = old; - } -- return c != (u); -+ return c != u; - } - - #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h -index b5aad96..99d7465 100644 ---- a/arch/sparc/include/asm/barrier_64.h -+++ b/arch/sparc/include/asm/barrier_64.h -@@ -57,7 +57,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ - do { \ - compiletime_assert_atomic_type(*p); \ - barrier(); \ -- ACCESS_ONCE(*p) = (v); \ -+ ACCESS_ONCE_RW(*p) = (v); \ - } while (0) - - #define smp_load_acquire(p) \ -diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h -index 5bb6991..5c2132e 100644 ---- a/arch/sparc/include/asm/cache.h -+++ b/arch/sparc/include/asm/cache.h -@@ -7,10 +7,12 @@ - #ifndef _SPARC_CACHE_H - #define _SPARC_CACHE_H - -+#include <linux/const.h> -+ - #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) - - #define L1_CACHE_SHIFT 5 --#define L1_CACHE_BYTES 32 -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #ifdef CONFIG_SPARC32 - #define SMP_CACHE_BYTES_SHIFT 5 -diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h -index a24e41f..47677ff 100644 ---- a/arch/sparc/include/asm/elf_32.h -+++ b/arch/sparc/include/asm/elf_32.h -@@ -114,6 +114,13 @@ typedef struct { - - #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE) - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE 0x10000UL -+ -+#define PAX_DELTA_MMAP_LEN 16 -+#define PAX_DELTA_STACK_LEN 16 -+#endif -+ - /* This yields a mask that user programs can use to figure out what - instruction set this cpu supports. This can NOT be done in userspace - on Sparc. */ -diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h -index 370ca1e..d4f4a98 100644 ---- a/arch/sparc/include/asm/elf_64.h -+++ b/arch/sparc/include/asm/elf_64.h -@@ -189,6 +189,13 @@ typedef struct { - #define ELF_ET_DYN_BASE 0x0000010000000000UL - #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL - -+#ifdef CONFIG_PAX_ASLR -+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL) -+ -+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28) -+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29) -+#endif -+ - extern unsigned long sparc64_elf_hwcap; - #define ELF_HWCAP sparc64_elf_hwcap - -diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h -index 9b1c36d..209298b 100644 ---- a/arch/sparc/include/asm/pgalloc_32.h -+++ b/arch/sparc/include/asm/pgalloc_32.h -@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) - } - - #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) -+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD)) - - static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, - unsigned long address) -diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h -index 2c8d41f..f337fbc 100644 ---- a/arch/sparc/include/asm/pgalloc_64.h -+++ b/arch/sparc/include/asm/pgalloc_64.h -@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud) - } - - #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD) -+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD)) - - static inline pgd_t *pgd_alloc(struct mm_struct *mm) - { -@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd) - } - - #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD) -+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD)) - - static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) - { -diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h -index 59ba6f6..4518128 100644 ---- a/arch/sparc/include/asm/pgtable.h -+++ b/arch/sparc/include/asm/pgtable.h -@@ -5,4 +5,8 @@ - #else - #include <asm/pgtable_32.h> - #endif -+ -+#define ktla_ktva(addr) (addr) -+#define ktva_ktla(addr) (addr) -+ - #endif -diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h -index 502f632..da1917f 100644 ---- a/arch/sparc/include/asm/pgtable_32.h -+++ b/arch/sparc/include/asm/pgtable_32.h -@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void); - #define PAGE_SHARED SRMMU_PAGE_SHARED - #define PAGE_COPY SRMMU_PAGE_COPY - #define PAGE_READONLY SRMMU_PAGE_RDONLY -+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC -+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC -+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC - #define PAGE_KERNEL SRMMU_PAGE_KERNEL - - /* Top-level page directory - dummy used by init-mm. -@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd; - - /* xwr */ - #define __P000 PAGE_NONE --#define __P001 PAGE_READONLY --#define __P010 PAGE_COPY --#define __P011 PAGE_COPY -+#define __P001 PAGE_READONLY_NOEXEC -+#define __P010 PAGE_COPY_NOEXEC -+#define __P011 PAGE_COPY_NOEXEC - #define __P100 PAGE_READONLY - #define __P101 PAGE_READONLY - #define __P110 PAGE_COPY - #define __P111 PAGE_COPY - - #define __S000 PAGE_NONE --#define __S001 PAGE_READONLY --#define __S010 PAGE_SHARED --#define __S011 PAGE_SHARED -+#define __S001 PAGE_READONLY_NOEXEC -+#define __S010 PAGE_SHARED_NOEXEC -+#define __S011 PAGE_SHARED_NOEXEC - #define __S100 PAGE_READONLY - #define __S101 PAGE_READONLY - #define __S110 PAGE_SHARED -diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h -index 79da178..c2eede8 100644 ---- a/arch/sparc/include/asm/pgtsrmmu.h -+++ b/arch/sparc/include/asm/pgtsrmmu.h -@@ -115,6 +115,11 @@ - SRMMU_EXEC | SRMMU_REF) - #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ - SRMMU_EXEC | SRMMU_REF) -+ -+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF) -+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) -+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) -+ - #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ - SRMMU_DIRTY | SRMMU_REF) - -diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h -index 9689176..63c18ea 100644 ---- a/arch/sparc/include/asm/spinlock_64.h -+++ b/arch/sparc/include/asm/spinlock_64.h -@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla - - /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ - --static void inline arch_read_lock(arch_rwlock_t *lock) -+static inline void arch_read_lock(arch_rwlock_t *lock) - { - unsigned long tmp1, tmp2; - - __asm__ __volatile__ ( - "1: ldsw [%2], %0\n" - " brlz,pn %0, 2f\n" --"4: add %0, 1, %1\n" -+"4: addcc %0, 1, %1\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" tvs %%icc, 6\n" -+#endif -+ - " cas [%2], %0, %1\n" - " cmp %0, %1\n" - " bne,pn %%icc, 1b\n" -@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock) - " .previous" - : "=&r" (tmp1), "=&r" (tmp2) - : "r" (lock) -- : "memory"); -+ : "memory", "cc"); - } - --static int inline arch_read_trylock(arch_rwlock_t *lock) -+static inline int arch_read_trylock(arch_rwlock_t *lock) - { - int tmp1, tmp2; - -@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) - "1: ldsw [%2], %0\n" - " brlz,a,pn %0, 2f\n" - " mov 0, %0\n" --" add %0, 1, %1\n" -+" addcc %0, 1, %1\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" tvs %%icc, 6\n" -+#endif -+ - " cas [%2], %0, %1\n" - " cmp %0, %1\n" - " bne,pn %%icc, 1b\n" -@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock) - return tmp1; - } - --static void inline arch_read_unlock(arch_rwlock_t *lock) -+static inline void arch_read_unlock(arch_rwlock_t *lock) - { - unsigned long tmp1, tmp2; - - __asm__ __volatile__( - "1: lduw [%2], %0\n" --" sub %0, 1, %1\n" -+" subcc %0, 1, %1\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+" tvs %%icc, 6\n" -+#endif -+ - " cas [%2], %0, %1\n" - " cmp %0, %1\n" - " bne,pn %%xcc, 1b\n" -@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock) - : "memory"); - } - --static void inline arch_write_lock(arch_rwlock_t *lock) -+static inline void arch_write_lock(arch_rwlock_t *lock) - { - unsigned long mask, tmp1, tmp2; - -@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock) - : "memory"); - } - --static void inline arch_write_unlock(arch_rwlock_t *lock) -+static inline void arch_write_unlock(arch_rwlock_t *lock) - { - __asm__ __volatile__( - " stw %%g0, [%0]" -@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock) - : "memory"); - } - --static int inline arch_write_trylock(arch_rwlock_t *lock) -+static inline int arch_write_trylock(arch_rwlock_t *lock) - { - unsigned long mask, tmp1, tmp2, result; - -diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h -index 96efa7a..16858bf 100644 ---- a/arch/sparc/include/asm/thread_info_32.h -+++ b/arch/sparc/include/asm/thread_info_32.h -@@ -49,6 +49,8 @@ struct thread_info { - unsigned long w_saved; - - struct restart_block restart_block; -+ -+ unsigned long lowest_stack; - }; - - /* -diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h -index cc6275c..7eb8e21 100644 ---- a/arch/sparc/include/asm/thread_info_64.h -+++ b/arch/sparc/include/asm/thread_info_64.h -@@ -63,6 +63,8 @@ struct thread_info { - struct pt_regs *kern_una_regs; - unsigned int kern_una_insn; - -+ unsigned long lowest_stack; -+ - unsigned long fpregs[(7 * 256) / sizeof(unsigned long)] - __attribute__ ((aligned(64))); - }; -@@ -190,12 +192,13 @@ register struct thread_info *current_thread_info_reg asm("g6"); - #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ - /* flag bit 4 is available */ - #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ --/* flag bit 6 is available */ -+#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */ - #define TIF_32BIT 7 /* 32-bit binary */ - #define TIF_NOHZ 8 /* in adaptive nohz mode */ - #define TIF_SECCOMP 9 /* secure computing */ - #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ - #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ -+ - /* NOTE: Thread flags >= 12 should be ones we have no interest - * in using in assembly, else we can't use the mask as - * an immediate value in instructions such as andcc. -@@ -215,12 +218,18 @@ register struct thread_info *current_thread_info_reg asm("g6"); - #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) - #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) - #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) -+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID) - - #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ - _TIF_DO_NOTIFY_RESUME_MASK | \ - _TIF_NEED_RESCHED) - #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING) - -+#define _TIF_WORK_SYSCALL \ -+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \ -+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID) -+ -+ - /* - * Thread-synchronous status. - * -diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h -index 0167d26..767bb0c 100644 ---- a/arch/sparc/include/asm/uaccess.h -+++ b/arch/sparc/include/asm/uaccess.h -@@ -1,5 +1,6 @@ - #ifndef ___ASM_SPARC_UACCESS_H - #define ___ASM_SPARC_UACCESS_H -+ - #if defined(__sparc__) && defined(__arch64__) - #include <asm/uaccess_64.h> - #else -diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h -index 53a28dd..6e11369 100644 ---- a/arch/sparc/include/asm/uaccess_32.h -+++ b/arch/sparc/include/asm/uaccess_32.h -@@ -47,6 +47,7 @@ - #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) - #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) - #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) -+#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size)) - #define access_ok(type, addr, size) \ - ({ (void)(type); __access_ok((unsigned long)(addr), size); }) - -@@ -250,27 +251,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig - - static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) - { -- if (n && __access_ok((unsigned long) to, n)) -+ if ((long)n < 0) -+ return n; -+ -+ if (n && __access_ok((unsigned long) to, n)) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); - return __copy_user(to, (__force void __user *) from, n); -- else -+ } else - return n; - } - - static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ -+ if (!__builtin_constant_p(n)) -+ check_object_size(from, n, true); -+ - return __copy_user(to, (__force void __user *) from, n); - } - - static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) - { -- if (n && __access_ok((unsigned long) from, n)) -+ if ((long)n < 0) -+ return n; -+ -+ if (n && __access_ok((unsigned long) from, n)) { -+ if (!__builtin_constant_p(n)) -+ check_object_size(to, n, false); - return __copy_user((__force void __user *) to, from, n); -- else -+ } else - return n; - } - - static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) - { -+ if ((long)n < 0) -+ return n; -+ - return __copy_user((__force void __user *) to, from, n); - } - -diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h -index ad7e178..26cd4a7 100644 ---- a/arch/sparc/include/asm/uaccess_64.h -+++ b/arch/sparc/include/asm/uaccess_64.h -@@ -10,6 +10,7 @@ - #include <linux/compiler.h> - #include <linux/string.h> - #include <linux/thread_info.h> -+#include <linux/kernel.h> - #include <asm/asi.h> - #include <asm/spitfire.h> - #include <asm-generic/uaccess-unaligned.h> -@@ -54,6 +55,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size) - return 1; - } - -+static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size) -+{ -+ return 1; -+} -+ - static inline int access_ok(int type, const void __user * addr, unsigned long size) - { - return 1; -@@ -214,8 +220,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from, - static inline unsigned long __must_check - copy_from_user(void *to, const void __user *from, unsigned long size) - { -- unsigned long ret = ___copy_from_user(to, from, size); -+ unsigned long ret; - -+ if ((long)size < 0 || size > INT_MAX) -+ return size; -+ -+ if (!__builtin_constant_p(size)) -+ check_object_size(to, size, false); -+ -+ ret = ___copy_from_user(to, from, size); - if (unlikely(ret)) - ret = copy_from_user_fixup(to, from, size); - -@@ -231,8 +244,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from, - static inline unsigned long __must_check - copy_to_user(void __user *to, const void *from, unsigned long size) - { -- unsigned long ret = ___copy_to_user(to, from, size); -+ unsigned long ret; - -+ if ((long)size < 0 || size > INT_MAX) -+ return size; -+ -+ if (!__builtin_constant_p(size)) -+ check_object_size(from, size, true); -+ -+ ret = ___copy_to_user(to, from, size); - if (unlikely(ret)) - ret = copy_to_user_fixup(to, from, size); - return ret; -diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile -index d15cc17..d0ae796 100644 ---- a/arch/sparc/kernel/Makefile -+++ b/arch/sparc/kernel/Makefile -@@ -4,7 +4,7 @@ - # - - asflags-y := -ansi --ccflags-y := -Werror -+#ccflags-y := -Werror - - extra-y := head_$(BITS).o - -diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c -index 510baec..9ff2607 100644 ---- a/arch/sparc/kernel/process_32.c -+++ b/arch/sparc/kernel/process_32.c -@@ -115,14 +115,14 @@ void show_regs(struct pt_regs *r) - - printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", - r->psr, r->pc, r->npc, r->y, print_tainted()); -- printk("PC: <%pS>\n", (void *) r->pc); -+ printk("PC: <%pA>\n", (void *) r->pc); - printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], - r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); - printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], - r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); -- printk("RPC: <%pS>\n", (void *) r->u_regs[15]); -+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]); - - printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", - rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], -@@ -159,7 +159,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) - rw = (struct reg_window32 *) fp; - pc = rw->ins[7]; - printk("[%08lx : ", pc); -- printk("%pS ] ", (void *) pc); -+ printk("%pA ] ", (void *) pc); - fp = rw->ins[6]; - } while (++count < 16); - printk("\n"); -diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c -index 1a79d68..84423a6 100644 ---- a/arch/sparc/kernel/process_64.c -+++ b/arch/sparc/kernel/process_64.c -@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs) - printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", - rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); - if (regs->tstate & TSTATE_PRIV) -- printk("I7: <%pS>\n", (void *) rwk->ins[7]); -+ printk("I7: <%pA>\n", (void *) rwk->ins[7]); - } - - void show_regs(struct pt_regs *regs) -@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs) - - printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, - regs->tpc, regs->tnpc, regs->y, print_tainted()); -- printk("TPC: <%pS>\n", (void *) regs->tpc); -+ printk("TPC: <%pA>\n", (void *) regs->tpc); - printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", - regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], - regs->u_regs[3]); -@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs) - printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", - regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], - regs->u_regs[15]); -- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]); -+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]); - show_regwindow(regs); - show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); - } -@@ -272,7 +272,7 @@ void arch_trigger_all_cpu_backtrace(void) - ((tp && tp->task) ? tp->task->pid : -1)); - - if (gp->tstate & TSTATE_PRIV) { -- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n", -+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n", - (void *) gp->tpc, - (void *) gp->o7, - (void *) gp->i7, -diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c -index 79cc0d1..ec62734 100644 ---- a/arch/sparc/kernel/prom_common.c -+++ b/arch/sparc/kernel/prom_common.c -@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf) - - unsigned int prom_early_allocated __initdata; - --static struct of_pdt_ops prom_sparc_ops __initdata = { -+static struct of_pdt_ops prom_sparc_ops __initconst = { - .nextprop = prom_common_nextprop, - .getproplen = prom_getproplen, - .getproperty = prom_getproperty, -diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c -index c13c9f2..d572c34 100644 ---- a/arch/sparc/kernel/ptrace_64.c -+++ b/arch/sparc/kernel/ptrace_64.c -@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request, - return ret; - } - -+#ifdef CONFIG_GRKERNSEC_SETXID -+extern void gr_delayed_cred_worker(void); -+#endif -+ - asmlinkage int syscall_trace_enter(struct pt_regs *regs) - { - int ret = 0; -@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) - if (test_thread_flag(TIF_NOHZ)) - user_exit(); - -+#ifdef CONFIG_GRKERNSEC_SETXID -+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) -+ gr_delayed_cred_worker(); -+#endif -+ - if (test_thread_flag(TIF_SYSCALL_TRACE)) - ret = tracehook_report_syscall_entry(regs); - -@@ -1093,6 +1102,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs) - if (test_thread_flag(TIF_NOHZ)) - user_exit(); - -+#ifdef CONFIG_GRKERNSEC_SETXID -+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID))) -+ gr_delayed_cred_worker(); -+#endif -+ - audit_syscall_exit(regs); - - if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) -diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c -index 9af0a5d..06e12f4 100644 ---- a/arch/sparc/kernel/smp_64.c -+++ b/arch/sparc/kernel/smp_64.c -@@ -874,8 +874,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah; - extern unsigned long xcall_flush_dcache_page_spitfire; - - #ifdef CONFIG_DEBUG_DCFLUSH --extern atomic_t dcpage_flushes; --extern atomic_t dcpage_flushes_xcall; -+extern atomic_unchecked_t dcpage_flushes; -+extern atomic_unchecked_t dcpage_flushes_xcall; - #endif - - static inline void __local_flush_dcache_page(struct page *page) -@@ -899,7 +899,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) - return; - - #ifdef CONFIG_DEBUG_DCFLUSH -- atomic_inc(&dcpage_flushes); -+ atomic_inc_unchecked(&dcpage_flushes); - #endif - - this_cpu = get_cpu(); -@@ -923,7 +923,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) - xcall_deliver(data0, __pa(pg_addr), - (u64) pg_addr, cpumask_of(cpu)); - #ifdef CONFIG_DEBUG_DCFLUSH -- atomic_inc(&dcpage_flushes_xcall); -+ atomic_inc_unchecked(&dcpage_flushes_xcall); - #endif - } - } -@@ -942,7 +942,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) - preempt_disable(); - - #ifdef CONFIG_DEBUG_DCFLUSH -- atomic_inc(&dcpage_flushes); -+ atomic_inc_unchecked(&dcpage_flushes); - #endif - data0 = 0; - pg_addr = page_address(page); -@@ -959,7 +959,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) - xcall_deliver(data0, __pa(pg_addr), - (u64) pg_addr, cpu_online_mask); - #ifdef CONFIG_DEBUG_DCFLUSH -- atomic_inc(&dcpage_flushes_xcall); -+ atomic_inc_unchecked(&dcpage_flushes_xcall); - #endif - } - __local_flush_dcache_page(page); -diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c -index 3a8d184..49498a8 100644 ---- a/arch/sparc/kernel/sys_sparc_32.c -+++ b/arch/sparc/kernel/sys_sparc_32.c -@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - if (len > TASK_SIZE - PAGE_SIZE) - return -ENOMEM; - if (!addr) -- addr = TASK_UNMAPPED_BASE; -+ addr = current->mm->mmap_base; - - info.flags = 0; - info.length = len; -diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c -index 25db14a..70162eb 100644 ---- a/arch/sparc/kernel/sys_sparc_64.c -+++ b/arch/sparc/kernel/sys_sparc_64.c -@@ -88,13 +88,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - struct vm_area_struct * vma; - unsigned long task_size = TASK_SIZE; - int do_color_align; -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); - struct vm_unmapped_area_info info; - - if (flags & MAP_FIXED) { - /* We do not accept a shared mapping if it would violate - * cache aliasing constraints. - */ -- if ((flags & MAP_SHARED) && -+ if ((filp || (flags & MAP_SHARED)) && - ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) - return -EINVAL; - return addr; -@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - if (filp || (flags & MAP_SHARED)) - do_color_align = 1; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - if (do_color_align) - addr = COLOR_ALIGN(addr, pgoff); -@@ -116,22 +121,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (task_size - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) - return addr; - } - - info.flags = 0; - info.length = len; -- info.low_limit = TASK_UNMAPPED_BASE; -+ info.low_limit = mm->mmap_base; - info.high_limit = min(task_size, VA_EXCLUDE_START); - info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; - info.align_offset = pgoff << PAGE_SHIFT; -+ info.threadstack_offset = offset; - addr = vm_unmapped_area(&info); - - if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { - VM_BUG_ON(addr != -ENOMEM); - info.low_limit = VA_EXCLUDE_END; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ info.low_limit += mm->delta_mmap; -+#endif -+ - info.high_limit = task_size; - addr = vm_unmapped_area(&info); - } -@@ -149,6 +160,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - unsigned long task_size = STACK_TOP32; - unsigned long addr = addr0; - int do_color_align; -+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags); - struct vm_unmapped_area_info info; - - /* This should only ever run for 32-bit processes. */ -@@ -158,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - /* We do not accept a shared mapping if it would violate - * cache aliasing constraints. - */ -- if ((flags & MAP_SHARED) && -+ if ((filp || (flags & MAP_SHARED)) && - ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) - return -EINVAL; - return addr; -@@ -171,6 +183,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - if (filp || (flags & MAP_SHARED)) - do_color_align = 1; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - /* requesting a specific address */ - if (addr) { - if (do_color_align) -@@ -179,8 +195,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - addr = PAGE_ALIGN(addr); - - vma = find_vma(mm, addr); -- if (task_size - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) - return addr; - } - -@@ -190,6 +205,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - info.high_limit = mm->mmap_base; - info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; - info.align_offset = pgoff << PAGE_SHIFT; -+ info.threadstack_offset = offset; - addr = vm_unmapped_area(&info); - - /* -@@ -202,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - VM_BUG_ON(addr != -ENOMEM); - info.flags = 0; - info.low_limit = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ info.low_limit += mm->delta_mmap; -+#endif -+ - info.high_limit = STACK_TOP32; - addr = vm_unmapped_area(&info); - } -@@ -258,10 +280,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u - EXPORT_SYMBOL(get_fb_unmapped_area); - - /* Essentially the same as PowerPC. */ --static unsigned long mmap_rnd(void) -+static unsigned long mmap_rnd(struct mm_struct *mm) - { - unsigned long rnd = 0UL; - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (current->flags & PF_RANDOMIZE) { - unsigned long val = get_random_int(); - if (test_thread_flag(TIF_32BIT)) -@@ -274,7 +300,7 @@ static unsigned long mmap_rnd(void) - - void arch_pick_mmap_layout(struct mm_struct *mm) - { -- unsigned long random_factor = mmap_rnd(); -+ unsigned long random_factor = mmap_rnd(mm); - unsigned long gap; - - /* -@@ -287,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - gap == RLIM_INFINITY || - sysctl_legacy_va_layout) { - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base += mm->delta_mmap; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area; - } else { - /* We know it's 32-bit */ -@@ -298,6 +330,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm) - gap = (task_size / 6 * 5); - - mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; -+#endif -+ - mm->get_unmapped_area = arch_get_unmapped_area_topdown; - } - } -diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S -index 33a17e7..d87fb1f 100644 ---- a/arch/sparc/kernel/syscalls.S -+++ b/arch/sparc/kernel/syscalls.S -@@ -52,7 +52,7 @@ sys32_rt_sigreturn: - #endif - .align 32 - 1: ldx [%g6 + TI_FLAGS], %l5 -- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 -+ andcc %l5, _TIF_WORK_SYSCALL, %g0 - be,pt %icc, rtrap - nop - call syscall_trace_leave -@@ -184,7 +184,7 @@ linux_sparc_syscall32: - - srl %i3, 0, %o3 ! IEU0 - srl %i2, 0, %o2 ! IEU0 Group -- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 -+ andcc %l0, _TIF_WORK_SYSCALL, %g0 - bne,pn %icc, linux_syscall_trace32 ! CTI - mov %i0, %l5 ! IEU1 - 5: call %l7 ! CTI Group brk forced -@@ -208,7 +208,7 @@ linux_sparc_syscall: - - mov %i3, %o3 ! IEU1 - mov %i4, %o4 ! IEU0 Group -- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 -+ andcc %l0, _TIF_WORK_SYSCALL, %g0 - bne,pn %icc, linux_syscall_trace ! CTI Group - mov %i0, %l5 ! IEU0 - 2: call %l7 ! CTI Group brk forced -@@ -223,7 +223,7 @@ ret_sys_call: - - cmp %o0, -ERESTART_RESTARTBLOCK - bgeu,pn %xcc, 1f -- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 -+ andcc %l0, _TIF_WORK_SYSCALL, %g0 - ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc - - 2: -diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c -index 6629829..036032d 100644 ---- a/arch/sparc/kernel/traps_32.c -+++ b/arch/sparc/kernel/traps_32.c -@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc) - #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") - #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") - -+extern void gr_handle_kernel_exploit(void); -+ - void die_if_kernel(char *str, struct pt_regs *regs) - { - static int die_counter; -@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs) - count++ < 30 && - (((unsigned long) rw) >= PAGE_OFFSET) && - !(((unsigned long) rw) & 0x7)) { -- printk("Caller[%08lx]: %pS\n", rw->ins[7], -+ printk("Caller[%08lx]: %pA\n", rw->ins[7], - (void *) rw->ins[7]); - rw = (struct reg_window32 *)rw->ins[6]; - } - } - printk("Instruction DUMP:"); - instruction_dump ((unsigned long *) regs->pc); -- if(regs->psr & PSR_PS) -+ if(regs->psr & PSR_PS) { -+ gr_handle_kernel_exploit(); - do_exit(SIGKILL); -+ } - do_exit(SIGSEGV); - } - -diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c -index 25d0c7e..b571456 100644 ---- a/arch/sparc/kernel/traps_64.c -+++ b/arch/sparc/kernel/traps_64.c -@@ -77,7 +77,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) - i + 1, - p->trapstack[i].tstate, p->trapstack[i].tpc, - p->trapstack[i].tnpc, p->trapstack[i].tt); -- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc); -+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc); - } - } - -@@ -97,6 +97,12 @@ void bad_trap(struct pt_regs *regs, long lvl) - - lvl -= 0x100; - if (regs->tstate & TSTATE_PRIV) { -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ if (lvl == 6) -+ pax_report_refcount_overflow(regs); -+#endif -+ - sprintf(buffer, "Kernel bad sw trap %lx", lvl); - die_if_kernel(buffer, regs); - } -@@ -115,11 +121,16 @@ void bad_trap(struct pt_regs *regs, long lvl) - void bad_trap_tl1(struct pt_regs *regs, long lvl) - { - char buffer[32]; -- -+ - if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, - 0, lvl, SIGTRAP) == NOTIFY_STOP) - return; - -+#ifdef CONFIG_PAX_REFCOUNT -+ if (lvl == 6) -+ pax_report_refcount_overflow(regs); -+#endif -+ - dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); - - sprintf (buffer, "Bad trap %lx at tl>0", lvl); -@@ -1149,7 +1160,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in - regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate); - printk("%s" "ERROR(%d): ", - (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id()); -- printk("TPC<%pS>\n", (void *) regs->tpc); -+ printk("TPC<%pA>\n", (void *) regs->tpc); - printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n", - (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(), - (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT, -@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) - smp_processor_id(), - (type & 0x1) ? 'I' : 'D', - regs->tpc); -- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc); -+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc); - panic("Irrecoverable Cheetah+ parity error."); - } - -@@ -1764,7 +1775,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) - smp_processor_id(), - (type & 0x1) ? 'I' : 'D', - regs->tpc); -- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc); -+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc); - } - - struct sun4v_error_entry { -@@ -1837,8 +1848,8 @@ struct sun4v_error_entry { - /*0x38*/u64 reserved_5; - }; - --static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); --static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); -+static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); -+static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); - - static const char *sun4v_err_type_to_str(u8 type) - { -@@ -1930,7 +1941,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs) - } - - static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, -- int cpu, const char *pfx, atomic_t *ocnt) -+ int cpu, const char *pfx, atomic_unchecked_t *ocnt) - { - u64 *raw_ptr = (u64 *) ent; - u32 attrs; -@@ -1988,8 +1999,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, - - show_regs(regs); - -- if ((cnt = atomic_read(ocnt)) != 0) { -- atomic_set(ocnt, 0); -+ if ((cnt = atomic_read_unchecked(ocnt)) != 0) { -+ atomic_set_unchecked(ocnt, 0); - wmb(); - printk("%s: Queue overflowed %d times.\n", - pfx, cnt); -@@ -2046,7 +2057,7 @@ out: - */ - void sun4v_resum_overflow(struct pt_regs *regs) - { -- atomic_inc(&sun4v_resum_oflow_cnt); -+ atomic_inc_unchecked(&sun4v_resum_oflow_cnt); - } - - /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. -@@ -2099,7 +2110,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs) - /* XXX Actually even this can make not that much sense. Perhaps - * XXX we should just pull the plug and panic directly from here? - */ -- atomic_inc(&sun4v_nonresum_oflow_cnt); -+ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt); - } - - static void sun4v_tlb_error(struct pt_regs *regs) -@@ -2118,9 +2129,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl) - - printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n", - regs->tpc, tl); -- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc); -+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc); - printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]); -- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n", -+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n", - (void *) regs->u_regs[UREG_I7]); - printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] " - "pte[%lx] error[%lx]\n", -@@ -2141,9 +2152,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl) - - printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n", - regs->tpc, tl); -- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc); -+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc); - printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]); -- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n", -+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n", - (void *) regs->u_regs[UREG_I7]); - printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] " - "pte[%lx] error[%lx]\n", -@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) - fp = (unsigned long)sf->fp + STACK_BIAS; - } - -- printk(" [%016lx] %pS\n", pc, (void *) pc); -+ printk(" [%016lx] %pA\n", pc, (void *) pc); - #ifdef CONFIG_FUNCTION_GRAPH_TRACER - if ((pc + 8UL) == (unsigned long) &return_to_handler) { - int index = tsk->curr_ret_stack; - if (tsk->ret_stack && index >= graph) { - pc = tsk->ret_stack[index - graph].ret; -- printk(" [%016lx] %pS\n", pc, (void *) pc); -+ printk(" [%016lx] %pA\n", pc, (void *) pc); - graph++; - } - } -@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw) - return (struct reg_window *) (fp + STACK_BIAS); - } - -+extern void gr_handle_kernel_exploit(void); -+ - void die_if_kernel(char *str, struct pt_regs *regs) - { - static int die_counter; -@@ -2414,7 +2427,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) - while (rw && - count++ < 30 && - kstack_valid(tp, (unsigned long) rw)) { -- printk("Caller[%016lx]: %pS\n", rw->ins[7], -+ printk("Caller[%016lx]: %pA\n", rw->ins[7], - (void *) rw->ins[7]); - - rw = kernel_stack_up(rw); -@@ -2427,8 +2440,10 @@ void die_if_kernel(char *str, struct pt_regs *regs) - } - user_instruction_dump ((unsigned int __user *) regs->tpc); - } -- if (regs->tstate & TSTATE_PRIV) -+ if (regs->tstate & TSTATE_PRIV) { -+ gr_handle_kernel_exploit(); - do_exit(SIGKILL); -+ } - do_exit(SIGSEGV); - } - EXPORT_SYMBOL(die_if_kernel); -diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c -index 35ab8b6..9046547 100644 ---- a/arch/sparc/kernel/unaligned_64.c -+++ b/arch/sparc/kernel/unaligned_64.c -@@ -295,7 +295,7 @@ static void log_unaligned(struct pt_regs *regs) - static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); - - if (__ratelimit(&ratelimit)) { -- printk("Kernel unaligned access at TPC[%lx] %pS\n", -+ printk("Kernel unaligned access at TPC[%lx] %pA\n", - regs->tpc, (void *) regs->tpc); - } - } -diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile -index dbe119b..089c7c1 100644 ---- a/arch/sparc/lib/Makefile -+++ b/arch/sparc/lib/Makefile -@@ -2,7 +2,7 @@ - # - - asflags-y := -ansi -DST_DIV0=0x02 --ccflags-y := -Werror -+#ccflags-y := -Werror - - lib-$(CONFIG_SPARC32) += ashrdi3.o - lib-$(CONFIG_SPARC32) += memcpy.o memset.o -diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S -index 85c233d..68500e0 100644 ---- a/arch/sparc/lib/atomic_64.S -+++ b/arch/sparc/lib/atomic_64.S -@@ -17,7 +17,12 @@ - ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: lduw [%o1], %g1 -- add %g1, %o0, %g7 -+ addcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %icc, 6 -+#endif -+ - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) -@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - ENDPROC(atomic_add) - -+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: lduw [%o1], %g1 -+ add %g1, %o0, %g7 -+ cas [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %icc, 2f -+ nop -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ENDPROC(atomic_add_unchecked) -+ - ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: lduw [%o1], %g1 -- sub %g1, %o0, %g7 -+ subcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %icc, 6 -+#endif -+ - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) -@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - ENDPROC(atomic_sub) - -+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: lduw [%o1], %g1 -+ sub %g1, %o0, %g7 -+ cas [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %icc, 2f -+ nop -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ENDPROC(atomic_sub_unchecked) -+ - ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: lduw [%o1], %g1 -- add %g1, %o0, %g7 -+ addcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %icc, 6 -+#endif -+ - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) -@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - ENDPROC(atomic_add_ret) - -+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: lduw [%o1], %g1 -+ addcc %g1, %o0, %g7 -+ cas [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %icc, 2f -+ add %g7, %o0, %g7 -+ sra %g7, 0, %o0 -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ENDPROC(atomic_add_ret_unchecked) -+ - ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: lduw [%o1], %g1 -- sub %g1, %o0, %g7 -+ subcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %icc, 6 -+#endif -+ - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) -@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret) - ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: ldx [%o1], %g1 -- add %g1, %o0, %g7 -+ addcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %xcc, 6 -+#endif -+ - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) -@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - ENDPROC(atomic64_add) - -+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: ldx [%o1], %g1 -+ addcc %g1, %o0, %g7 -+ casx [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %xcc, 2f -+ nop -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ENDPROC(atomic64_add_unchecked) -+ - ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: ldx [%o1], %g1 -- sub %g1, %o0, %g7 -+ subcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %xcc, 6 -+#endif -+ - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) -@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - ENDPROC(atomic64_sub) - -+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: ldx [%o1], %g1 -+ subcc %g1, %o0, %g7 -+ casx [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %xcc, 2f -+ nop -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ENDPROC(atomic64_sub_unchecked) -+ - ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: ldx [%o1], %g1 -- add %g1, %o0, %g7 -+ addcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %xcc, 6 -+#endif -+ - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) -@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ - 2: BACKOFF_SPIN(%o2, %o3, 1b) - ENDPROC(atomic64_add_ret) - -+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */ -+ BACKOFF_SETUP(%o2) -+1: ldx [%o1], %g1 -+ addcc %g1, %o0, %g7 -+ casx [%o1], %g1, %g7 -+ cmp %g1, %g7 -+ bne,pn %xcc, 2f -+ add %g7, %o0, %g7 -+ mov %g7, %o0 -+ retl -+ nop -+2: BACKOFF_SPIN(%o2, %o3, 1b) -+ENDPROC(atomic64_add_ret_unchecked) -+ - ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) - 1: ldx [%o1], %g1 -- sub %g1, %o0, %g7 -+ subcc %g1, %o0, %g7 -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ tvs %xcc, 6 -+#endif -+ - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) -diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c -index ac094de..e8ee09d 100644 ---- a/arch/sparc/lib/ksyms.c -+++ b/arch/sparc/lib/ksyms.c -@@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user); - - /* Atomic counter implementation. */ - EXPORT_SYMBOL(atomic_add); -+EXPORT_SYMBOL(atomic_add_unchecked); - EXPORT_SYMBOL(atomic_add_ret); -+EXPORT_SYMBOL(atomic_add_ret_unchecked); - EXPORT_SYMBOL(atomic_sub); -+EXPORT_SYMBOL(atomic_sub_unchecked); - EXPORT_SYMBOL(atomic_sub_ret); - EXPORT_SYMBOL(atomic64_add); -+EXPORT_SYMBOL(atomic64_add_unchecked); - EXPORT_SYMBOL(atomic64_add_ret); -+EXPORT_SYMBOL(atomic64_add_ret_unchecked); - EXPORT_SYMBOL(atomic64_sub); -+EXPORT_SYMBOL(atomic64_sub_unchecked); - EXPORT_SYMBOL(atomic64_sub_ret); - EXPORT_SYMBOL(atomic64_dec_if_positive); - -diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile -index 30c3ecc..736f015 100644 ---- a/arch/sparc/mm/Makefile -+++ b/arch/sparc/mm/Makefile -@@ -2,7 +2,7 @@ - # - - asflags-y := -ansi --ccflags-y := -Werror -+#ccflags-y := -Werror - - obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o - obj-y += fault_$(BITS).o -diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c -index 163c787..6f9ee6c 100644 ---- a/arch/sparc/mm/fault_32.c -+++ b/arch/sparc/mm/fault_32.c -@@ -21,6 +21,9 @@ - #include <linux/perf_event.h> - #include <linux/interrupt.h> - #include <linux/kdebug.h> -+#include <linux/slab.h> -+#include <linux/pagemap.h> -+#include <linux/compiler.h> - - #include <asm/page.h> - #include <asm/pgtable.h> -@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault) - return safe_compute_effective_address(regs, insn); - } - -+#ifdef CONFIG_PAX_PAGEEXEC -+#ifdef CONFIG_PAX_DLRESOLVE -+static void pax_emuplt_close(struct vm_area_struct *vma) -+{ -+ vma->vm_mm->call_dl_resolve = 0UL; -+} -+ -+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -+{ -+ unsigned int *kaddr; -+ -+ vmf->page = alloc_page(GFP_HIGHUSER); -+ if (!vmf->page) -+ return VM_FAULT_OOM; -+ -+ kaddr = kmap(vmf->page); -+ memset(kaddr, 0, PAGE_SIZE); -+ kaddr[0] = 0x9DE3BFA8U; /* save */ -+ flush_dcache_page(vmf->page); -+ kunmap(vmf->page); -+ return VM_FAULT_MAJOR; -+} -+ -+static const struct vm_operations_struct pax_vm_ops = { -+ .close = pax_emuplt_close, -+ .fault = pax_emuplt_fault -+}; -+ -+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) -+{ -+ int ret; -+ -+ INIT_LIST_HEAD(&vma->anon_vma_chain); -+ vma->vm_mm = current->mm; -+ vma->vm_start = addr; -+ vma->vm_end = addr + PAGE_SIZE; -+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; -+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); -+ vma->vm_ops = &pax_vm_ops; -+ -+ ret = insert_vm_struct(current->mm, vma); -+ if (ret) -+ return ret; -+ -+ ++current->mm->total_vm; -+ return 0; -+} -+#endif -+ -+/* -+ * PaX: decide what to do with offenders (regs->pc = fault address) -+ * -+ * returns 1 when task should be killed -+ * 2 when patched PLT trampoline was detected -+ * 3 when unpatched PLT trampoline was detected -+ */ -+static int pax_handle_fetch_fault(struct pt_regs *regs) -+{ -+ -+#ifdef CONFIG_PAX_EMUPLT -+ int err; -+ -+ do { /* PaX: patched PLT emulation #1 */ -+ unsigned int sethi1, sethi2, jmpl; -+ -+ err = get_user(sethi1, (unsigned int *)regs->pc); -+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4)); -+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi1 & 0xFFC00000U) == 0x03000000U && -+ (sethi2 & 0xFFC00000U) == 0x03000000U && -+ (jmpl & 0xFFFFE000U) == 0x81C06000U) -+ { -+ unsigned int addr; -+ -+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; -+ addr = regs->u_regs[UREG_G1]; -+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); -+ regs->pc = addr; -+ regs->npc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #2 */ -+ unsigned int ba; -+ -+ err = get_user(ba, (unsigned int *)regs->pc); -+ -+ if (err) -+ break; -+ -+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) { -+ unsigned int addr; -+ -+ if ((ba & 0xFFC00000U) == 0x30800000U) -+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); -+ else -+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); -+ regs->pc = addr; -+ regs->npc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #3 */ -+ unsigned int sethi, bajmpl, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->pc); -+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4)); -+ err |= get_user(nop, (unsigned int *)(regs->pc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) && -+ nop == 0x01000000U) -+ { -+ unsigned int addr; -+ -+ addr = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G1] = addr; -+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U) -+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); -+ else -+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); -+ regs->pc = addr; -+ regs->npc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: unpatched PLT emulation step 1 */ -+ unsigned int sethi, ba, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->pc); -+ err |= get_user(ba, (unsigned int *)(regs->pc+4)); -+ err |= get_user(nop, (unsigned int *)(regs->pc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && -+ nop == 0x01000000U) -+ { -+ unsigned int addr, save, call; -+ -+ if ((ba & 0xFFC00000U) == 0x30800000U) -+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); -+ else -+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); -+ -+ err = get_user(save, (unsigned int *)addr); -+ err |= get_user(call, (unsigned int *)(addr+4)); -+ err |= get_user(nop, (unsigned int *)(addr+8)); -+ if (err) -+ break; -+ -+#ifdef CONFIG_PAX_DLRESOLVE -+ if (save == 0x9DE3BFA8U && -+ (call & 0xC0000000U) == 0x40000000U && -+ nop == 0x01000000U) -+ { -+ struct vm_area_struct *vma; -+ unsigned long call_dl_resolve; -+ -+ down_read(¤t->mm->mmap_sem); -+ call_dl_resolve = current->mm->call_dl_resolve; -+ up_read(¤t->mm->mmap_sem); -+ if (likely(call_dl_resolve)) -+ goto emulate; -+ -+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); -+ -+ down_write(¤t->mm->mmap_sem); -+ if (current->mm->call_dl_resolve) { -+ call_dl_resolve = current->mm->call_dl_resolve; -+ up_write(¤t->mm->mmap_sem); -+ if (vma) -+ kmem_cache_free(vm_area_cachep, vma); -+ goto emulate; -+ } -+ -+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); -+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { -+ up_write(¤t->mm->mmap_sem); -+ if (vma) -+ kmem_cache_free(vm_area_cachep, vma); -+ return 1; -+ } -+ -+ if (pax_insert_vma(vma, call_dl_resolve)) { -+ up_write(¤t->mm->mmap_sem); -+ kmem_cache_free(vm_area_cachep, vma); -+ return 1; -+ } -+ -+ current->mm->call_dl_resolve = call_dl_resolve; -+ up_write(¤t->mm->mmap_sem); -+ -+emulate: -+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; -+ regs->pc = call_dl_resolve; -+ regs->npc = addr+4; -+ return 3; -+ } -+#endif -+ -+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ -+ if ((save & 0xFFC00000U) == 0x05000000U && -+ (call & 0xFFFFE000U) == 0x85C0A000U && -+ nop == 0x01000000U) -+ { -+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G2] = addr + 4; -+ addr = (save & 0x003FFFFFU) << 10; -+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); -+ regs->pc = addr; -+ regs->npc = addr+4; -+ return 3; -+ } -+ } -+ } while (0); -+ -+ do { /* PaX: unpatched PLT emulation step 2 */ -+ unsigned int save, call, nop; -+ -+ err = get_user(save, (unsigned int *)(regs->pc-4)); -+ err |= get_user(call, (unsigned int *)regs->pc); -+ err |= get_user(nop, (unsigned int *)(regs->pc+4)); -+ if (err) -+ break; -+ -+ if (save == 0x9DE3BFA8U && -+ (call & 0xC0000000U) == 0x40000000U && -+ nop == 0x01000000U) -+ { -+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2); -+ -+ regs->u_regs[UREG_RETPC] = regs->pc; -+ regs->pc = dl_resolve; -+ regs->npc = dl_resolve+4; -+ return 3; -+ } -+ } while (0); -+#endif -+ -+ return 1; -+} -+ -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 8; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, - int text_fault) - { -@@ -229,6 +503,24 @@ good_area: - if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; - } else { -+ -+#ifdef CONFIG_PAX_PAGEEXEC -+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) { -+ up_read(&mm->mmap_sem); -+ switch (pax_handle_fetch_fault(regs)) { -+ -+#ifdef CONFIG_PAX_EMUPLT -+ case 2: -+ case 3: -+ return; -+#endif -+ -+ } -+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - /* Allow reads even for write-only mappings */ - if (!(vma->vm_flags & (VM_READ | VM_EXEC))) - goto bad_area; -diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c -index 0d6de79..32851cb 100644 ---- a/arch/sparc/mm/fault_64.c -+++ b/arch/sparc/mm/fault_64.c -@@ -22,6 +22,9 @@ - #include <linux/kdebug.h> - #include <linux/percpu.h> - #include <linux/context_tracking.h> -+#include <linux/slab.h> -+#include <linux/pagemap.h> -+#include <linux/compiler.h> - - #include <asm/page.h> - #include <asm/pgtable.h> -@@ -75,7 +78,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) - printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", - regs->tpc); - printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); -- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]); -+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]); - printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); - dump_stack(); - unhandled_fault(regs->tpc, current, regs); -@@ -281,6 +284,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) - show_regs(regs); - } - -+#ifdef CONFIG_PAX_PAGEEXEC -+#ifdef CONFIG_PAX_DLRESOLVE -+static void pax_emuplt_close(struct vm_area_struct *vma) -+{ -+ vma->vm_mm->call_dl_resolve = 0UL; -+} -+ -+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -+{ -+ unsigned int *kaddr; -+ -+ vmf->page = alloc_page(GFP_HIGHUSER); -+ if (!vmf->page) -+ return VM_FAULT_OOM; -+ -+ kaddr = kmap(vmf->page); -+ memset(kaddr, 0, PAGE_SIZE); -+ kaddr[0] = 0x9DE3BFA8U; /* save */ -+ flush_dcache_page(vmf->page); -+ kunmap(vmf->page); -+ return VM_FAULT_MAJOR; -+} -+ -+static const struct vm_operations_struct pax_vm_ops = { -+ .close = pax_emuplt_close, -+ .fault = pax_emuplt_fault -+}; -+ -+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) -+{ -+ int ret; -+ -+ INIT_LIST_HEAD(&vma->anon_vma_chain); -+ vma->vm_mm = current->mm; -+ vma->vm_start = addr; -+ vma->vm_end = addr + PAGE_SIZE; -+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; -+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); -+ vma->vm_ops = &pax_vm_ops; -+ -+ ret = insert_vm_struct(current->mm, vma); -+ if (ret) -+ return ret; -+ -+ ++current->mm->total_vm; -+ return 0; -+} -+#endif -+ -+/* -+ * PaX: decide what to do with offenders (regs->tpc = fault address) -+ * -+ * returns 1 when task should be killed -+ * 2 when patched PLT trampoline was detected -+ * 3 when unpatched PLT trampoline was detected -+ */ -+static int pax_handle_fetch_fault(struct pt_regs *regs) -+{ -+ -+#ifdef CONFIG_PAX_EMUPLT -+ int err; -+ -+ do { /* PaX: patched PLT emulation #1 */ -+ unsigned int sethi1, sethi2, jmpl; -+ -+ err = get_user(sethi1, (unsigned int *)regs->tpc); -+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi1 & 0xFFC00000U) == 0x03000000U && -+ (sethi2 & 0xFFC00000U) == 0x03000000U && -+ (jmpl & 0xFFFFE000U) == 0x81C06000U) -+ { -+ unsigned long addr; -+ -+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; -+ addr = regs->u_regs[UREG_G1]; -+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #2 */ -+ unsigned int ba; -+ -+ err = get_user(ba, (unsigned int *)regs->tpc); -+ -+ if (err) -+ break; -+ -+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) { -+ unsigned long addr; -+ -+ if ((ba & 0xFFC00000U) == 0x30800000U) -+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); -+ else -+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #3 */ -+ unsigned int sethi, bajmpl, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) && -+ nop == 0x01000000U) -+ { -+ unsigned long addr; -+ -+ addr = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G1] = addr; -+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U) -+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); -+ else -+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #4 */ -+ unsigned int sethi, mov1, call, mov2; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(call, (unsigned int *)(regs->tpc+8)); -+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ mov1 == 0x8210000FU && -+ (call & 0xC0000000U) == 0x40000000U && -+ mov2 == 0x9E100001U) -+ { -+ unsigned long addr; -+ -+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC]; -+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #5 */ -+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); -+ err |= get_user(or1, (unsigned int *)(regs->tpc+12)); -+ err |= get_user(or2, (unsigned int *)(regs->tpc+16)); -+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20)); -+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24)); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+28)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ (sethi1 & 0xFFC00000U) == 0x03000000U && -+ (sethi2 & 0xFFC00000U) == 0x0B000000U && -+ (or1 & 0xFFFFE000U) == 0x82106000U && -+ (or2 & 0xFFFFE000U) == 0x8A116000U && -+ sllx == 0x83287020U && -+ jmpl == 0x81C04005U && -+ nop == 0x01000000U) -+ { -+ unsigned long addr; -+ -+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); -+ regs->u_regs[UREG_G1] <<= 32; -+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); -+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: patched PLT emulation #6 */ -+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); -+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12)); -+ err |= get_user(or, (unsigned int *)(regs->tpc+16)); -+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20)); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+24)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ (sethi1 & 0xFFC00000U) == 0x03000000U && -+ (sethi2 & 0xFFC00000U) == 0x0B000000U && -+ sllx == 0x83287020U && -+ (or & 0xFFFFE000U) == 0x8A116000U && -+ jmpl == 0x81C04005U && -+ nop == 0x01000000U) -+ { -+ unsigned long addr; -+ -+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G1] <<= 32; -+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU); -+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+ do { /* PaX: unpatched PLT emulation step 1 */ -+ unsigned int sethi, ba, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && -+ nop == 0x01000000U) -+ { -+ unsigned long addr; -+ unsigned int save, call; -+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl; -+ -+ if ((ba & 0xFFC00000U) == 0x30800000U) -+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); -+ else -+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ err = get_user(save, (unsigned int *)addr); -+ err |= get_user(call, (unsigned int *)(addr+4)); -+ err |= get_user(nop, (unsigned int *)(addr+8)); -+ if (err) -+ break; -+ -+#ifdef CONFIG_PAX_DLRESOLVE -+ if (save == 0x9DE3BFA8U && -+ (call & 0xC0000000U) == 0x40000000U && -+ nop == 0x01000000U) -+ { -+ struct vm_area_struct *vma; -+ unsigned long call_dl_resolve; -+ -+ down_read(¤t->mm->mmap_sem); -+ call_dl_resolve = current->mm->call_dl_resolve; -+ up_read(¤t->mm->mmap_sem); -+ if (likely(call_dl_resolve)) -+ goto emulate; -+ -+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); -+ -+ down_write(¤t->mm->mmap_sem); -+ if (current->mm->call_dl_resolve) { -+ call_dl_resolve = current->mm->call_dl_resolve; -+ up_write(¤t->mm->mmap_sem); -+ if (vma) -+ kmem_cache_free(vm_area_cachep, vma); -+ goto emulate; -+ } -+ -+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); -+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { -+ up_write(¤t->mm->mmap_sem); -+ if (vma) -+ kmem_cache_free(vm_area_cachep, vma); -+ return 1; -+ } -+ -+ if (pax_insert_vma(vma, call_dl_resolve)) { -+ up_write(¤t->mm->mmap_sem); -+ kmem_cache_free(vm_area_cachep, vma); -+ return 1; -+ } -+ -+ current->mm->call_dl_resolve = call_dl_resolve; -+ up_write(¤t->mm->mmap_sem); -+ -+emulate: -+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; -+ regs->tpc = call_dl_resolve; -+ regs->tnpc = addr+4; -+ return 3; -+ } -+#endif -+ -+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ -+ if ((save & 0xFFC00000U) == 0x05000000U && -+ (call & 0xFFFFE000U) == 0x85C0A000U && -+ nop == 0x01000000U) -+ { -+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G2] = addr + 4; -+ addr = (save & 0x003FFFFFU) << 10; -+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 3; -+ } -+ -+ /* PaX: 64-bit PLT stub */ -+ err = get_user(sethi1, (unsigned int *)addr); -+ err |= get_user(sethi2, (unsigned int *)(addr+4)); -+ err |= get_user(or1, (unsigned int *)(addr+8)); -+ err |= get_user(or2, (unsigned int *)(addr+12)); -+ err |= get_user(sllx, (unsigned int *)(addr+16)); -+ err |= get_user(add, (unsigned int *)(addr+20)); -+ err |= get_user(jmpl, (unsigned int *)(addr+24)); -+ err |= get_user(nop, (unsigned int *)(addr+28)); -+ if (err) -+ break; -+ -+ if ((sethi1 & 0xFFC00000U) == 0x09000000U && -+ (sethi2 & 0xFFC00000U) == 0x0B000000U && -+ (or1 & 0xFFFFE000U) == 0x88112000U && -+ (or2 & 0xFFFFE000U) == 0x8A116000U && -+ sllx == 0x89293020U && -+ add == 0x8A010005U && -+ jmpl == 0x89C14000U && -+ nop == 0x01000000U) -+ { -+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); -+ regs->u_regs[UREG_G4] <<= 32; -+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); -+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4]; -+ regs->u_regs[UREG_G4] = addr + 24; -+ addr = regs->u_regs[UREG_G5]; -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 3; -+ } -+ } -+ } while (0); -+ -+#ifdef CONFIG_PAX_DLRESOLVE -+ do { /* PaX: unpatched PLT emulation step 2 */ -+ unsigned int save, call, nop; -+ -+ err = get_user(save, (unsigned int *)(regs->tpc-4)); -+ err |= get_user(call, (unsigned int *)regs->tpc); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+4)); -+ if (err) -+ break; -+ -+ if (save == 0x9DE3BFA8U && -+ (call & 0xC0000000U) == 0x40000000U && -+ nop == 0x01000000U) -+ { -+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ dl_resolve &= 0xFFFFFFFFUL; -+ -+ regs->u_regs[UREG_RETPC] = regs->tpc; -+ regs->tpc = dl_resolve; -+ regs->tnpc = dl_resolve+4; -+ return 3; -+ } -+ } while (0); -+#endif -+ -+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */ -+ unsigned int sethi, ba, nop; -+ -+ err = get_user(sethi, (unsigned int *)regs->tpc); -+ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); -+ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); -+ -+ if (err) -+ break; -+ -+ if ((sethi & 0xFFC00000U) == 0x03000000U && -+ (ba & 0xFFF00000U) == 0x30600000U && -+ nop == 0x01000000U) -+ { -+ unsigned long addr; -+ -+ addr = (sethi & 0x003FFFFFU) << 10; -+ regs->u_regs[UREG_G1] = addr; -+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); -+ -+ if (test_thread_flag(TIF_32BIT)) -+ addr &= 0xFFFFFFFFUL; -+ -+ regs->tpc = addr; -+ regs->tnpc = addr+4; -+ return 2; -+ } -+ } while (0); -+ -+#endif -+ -+ return 1; -+} -+ -+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp) -+{ -+ unsigned long i; -+ -+ printk(KERN_ERR "PAX: bytes at PC: "); -+ for (i = 0; i < 8; i++) { -+ unsigned int c; -+ if (get_user(c, (unsigned int *)pc+i)) -+ printk(KERN_CONT "???????? "); -+ else -+ printk(KERN_CONT "%08x ", c); -+ } -+ printk("\n"); -+} -+#endif -+ - asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) - { - enum ctx_state prev_state = exception_enter(); -@@ -355,6 +818,29 @@ retry: - if (!vma) - goto bad_area; - -+#ifdef CONFIG_PAX_PAGEEXEC -+ /* PaX: detect ITLB misses on non-exec pages */ -+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address && -+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB)) -+ { -+ if (address != regs->tpc) -+ goto good_area; -+ -+ up_read(&mm->mmap_sem); -+ switch (pax_handle_fetch_fault(regs)) { -+ -+#ifdef CONFIG_PAX_EMUPLT -+ case 2: -+ case 3: -+ return; -+#endif -+ -+ } -+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS)); -+ do_group_exit(SIGKILL); -+ } -+#endif -+ - /* Pure DTLB misses do not tell us whether the fault causing - * load/store/atomic was a write or not, it only says that there - * was no match. So in such a case we (carefully) read the -diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c -index d329537..2c3746a 100644 ---- a/arch/sparc/mm/hugetlbpage.c -+++ b/arch/sparc/mm/hugetlbpage.c -@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, - unsigned long addr, - unsigned long len, - unsigned long pgoff, -- unsigned long flags) -+ unsigned long flags, -+ unsigned long offset) - { -+ struct mm_struct *mm = current->mm; - unsigned long task_size = TASK_SIZE; - struct vm_unmapped_area_info info; - -@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, - - info.flags = 0; - info.length = len; -- info.low_limit = TASK_UNMAPPED_BASE; -+ info.low_limit = mm->mmap_base; - info.high_limit = min(task_size, VA_EXCLUDE_START); - info.align_mask = PAGE_MASK & ~HPAGE_MASK; - info.align_offset = 0; -+ info.threadstack_offset = offset; - addr = vm_unmapped_area(&info); - - if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { - VM_BUG_ON(addr != -ENOMEM); - info.low_limit = VA_EXCLUDE_END; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ info.low_limit += mm->delta_mmap; -+#endif -+ - info.high_limit = task_size; - addr = vm_unmapped_area(&info); - } -@@ -55,7 +64,8 @@ static unsigned long - hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - const unsigned long len, - const unsigned long pgoff, -- const unsigned long flags) -+ const unsigned long flags, -+ const unsigned long offset) - { - struct mm_struct *mm = current->mm; - unsigned long addr = addr0; -@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - info.high_limit = mm->mmap_base; - info.align_mask = PAGE_MASK & ~HPAGE_MASK; - info.align_offset = 0; -+ info.threadstack_offset = offset; - addr = vm_unmapped_area(&info); - - /* -@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, - VM_BUG_ON(addr != -ENOMEM); - info.flags = 0; - info.low_limit = TASK_UNMAPPED_BASE; -+ -+#ifdef CONFIG_PAX_RANDMMAP -+ if (mm->pax_flags & MF_PAX_RANDMMAP) -+ info.low_limit += mm->delta_mmap; -+#endif -+ - info.high_limit = STACK_TOP32; - addr = vm_unmapped_area(&info); - } -@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - unsigned long task_size = TASK_SIZE; -+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags); - - if (test_thread_flag(TIF_32BIT)) - task_size = STACK_TOP32; -@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - return addr; - } - -+#ifdef CONFIG_PAX_RANDMMAP -+ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) -+#endif -+ - if (addr) { - addr = ALIGN(addr, HPAGE_SIZE); - vma = find_vma(mm, addr); -- if (task_size - len >= addr && -- (!vma || addr + len <= vma->vm_start)) -+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset)) - return addr; - } - if (mm->get_unmapped_area == arch_get_unmapped_area) - return hugetlb_get_unmapped_area_bottomup(file, addr, len, -- pgoff, flags); -+ pgoff, flags, offset); - else - return hugetlb_get_unmapped_area_topdown(file, addr, len, -- pgoff, flags); -+ pgoff, flags, offset); - } - - pte_t *huge_pte_alloc(struct mm_struct *mm, -diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c -index 34506f2..0621e68 100644 ---- a/arch/sparc/mm/init_64.c -+++ b/arch/sparc/mm/init_64.c -@@ -184,9 +184,9 @@ unsigned long sparc64_kern_sec_context __read_mostly; - int num_kernel_image_mappings; - - #ifdef CONFIG_DEBUG_DCFLUSH --atomic_t dcpage_flushes = ATOMIC_INIT(0); -+atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0); - #ifdef CONFIG_SMP --atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); -+atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0); - #endif - #endif - -@@ -194,7 +194,7 @@ inline void flush_dcache_page_impl(struct page *page) - { - BUG_ON(tlb_type == hypervisor); - #ifdef CONFIG_DEBUG_DCFLUSH -- atomic_inc(&dcpage_flushes); -+ atomic_inc_unchecked(&dcpage_flushes); - #endif - - #ifdef DCACHE_ALIASING_POSSIBLE -@@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m) - - #ifdef CONFIG_DEBUG_DCFLUSH - seq_printf(m, "DCPageFlushes\t: %d\n", -- atomic_read(&dcpage_flushes)); -+ atomic_read_unchecked(&dcpage_flushes)); - #ifdef CONFIG_SMP - seq_printf(m, "DCPageFlushesXC\t: %d\n", -- atomic_read(&dcpage_flushes_xcall)); -+ atomic_read_unchecked(&dcpage_flushes_xcall)); - #endif /* CONFIG_SMP */ - #endif /* CONFIG_DEBUG_DCFLUSH */ - } -diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig -index b3692ce..e4517c9 100644 ---- a/arch/tile/Kconfig -+++ b/arch/tile/Kconfig -@@ -184,6 +184,7 @@ source "kernel/Kconfig.hz" - - config KEXEC - bool "kexec system call" -+ depends on !GRKERNSEC_KMEM - ---help--- - kexec is a system call that implements the ability to shutdown your - current kernel, and to start another kernel. It is like a reboot -diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h -index ad220ee..2f537b3 100644 ---- a/arch/tile/include/asm/atomic_64.h -+++ b/arch/tile/include/asm/atomic_64.h -@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) - - #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -+#define atomic64_read_unchecked(v) atomic64_read(v) -+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i)) -+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v)) -+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v)) -+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v)) -+#define atomic64_inc_unchecked(v) atomic64_inc(v) -+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v) -+#define atomic64_dec_unchecked(v) atomic64_dec(v) -+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n)) -+ - /* Atomic dec and inc don't implement barrier, so provide them if needed. */ - #define smp_mb__before_atomic_dec() smp_mb() - #define smp_mb__after_atomic_dec() smp_mb() -diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h -index 6160761..00cac88 100644 ---- a/arch/tile/include/asm/cache.h -+++ b/arch/tile/include/asm/cache.h -@@ -15,11 +15,12 @@ - #ifndef _ASM_TILE_CACHE_H - #define _ASM_TILE_CACHE_H - -+#include <linux/const.h> - #include <arch/chip.h> - - /* bytes per L1 data cache line */ - #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE() --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - /* bytes per L2 cache line */ - #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE() -diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h -index b6cde32..c0cb736 100644 ---- a/arch/tile/include/asm/uaccess.h -+++ b/arch/tile/include/asm/uaccess.h -@@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to, - const void __user *from, - unsigned long n) - { -- int sz = __compiletime_object_size(to); -+ size_t sz = __compiletime_object_size(to); - -- if (likely(sz == -1 || sz >= n)) -+ if (likely(sz == (size_t)-1 || sz >= n)) - n = _copy_from_user(to, from, n); - else - copy_from_user_overflow(); -diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c -index e514899..f8743c4 100644 ---- a/arch/tile/mm/hugetlbpage.c -+++ b/arch/tile/mm/hugetlbpage.c -@@ -207,6 +207,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, - info.high_limit = TASK_SIZE; - info.align_mask = PAGE_MASK & ~huge_page_mask(h); - info.align_offset = 0; -+ info.threadstack_offset = 0; - return vm_unmapped_area(&info); - } - -@@ -224,6 +225,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, - info.high_limit = current->mm->mmap_base; - info.align_mask = PAGE_MASK & ~huge_page_mask(h); - info.align_offset = 0; -+ info.threadstack_offset = 0; - addr = vm_unmapped_area(&info); - - /* -diff --git a/arch/um/Makefile b/arch/um/Makefile -index 36e658a..71a5c5a 100644 ---- a/arch/um/Makefile -+++ b/arch/um/Makefile -@@ -72,6 +72,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\ - $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \ - $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include - -+ifdef CONSTIFY_PLUGIN -+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify -+endif -+ - #This will adjust *FLAGS accordingly to the platform. - include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS) - -diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h -index 19e1bdd..3665b77 100644 ---- a/arch/um/include/asm/cache.h -+++ b/arch/um/include/asm/cache.h -@@ -1,6 +1,7 @@ - #ifndef __UM_CACHE_H - #define __UM_CACHE_H - -+#include <linux/const.h> - - #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT) - # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) -@@ -12,6 +13,6 @@ - # define L1_CACHE_SHIFT 5 - #endif - --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #endif -diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h -index 2e0a6b1..a64d0f5 100644 ---- a/arch/um/include/asm/kmap_types.h -+++ b/arch/um/include/asm/kmap_types.h -@@ -8,6 +8,6 @@ - - /* No more #include "asm/arch/kmap_types.h" ! */ - --#define KM_TYPE_NR 14 -+#define KM_TYPE_NR 15 - - #endif -diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h -index 5ff53d9..5850cdf 100644 ---- a/arch/um/include/asm/page.h -+++ b/arch/um/include/asm/page.h -@@ -14,6 +14,9 @@ - #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) - #define PAGE_MASK (~(PAGE_SIZE-1)) - -+#define ktla_ktva(addr) (addr) -+#define ktva_ktla(addr) (addr) -+ - #ifndef __ASSEMBLY__ - - struct page; -diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h -index 0032f92..cd151e0 100644 ---- a/arch/um/include/asm/pgtable-3level.h -+++ b/arch/um/include/asm/pgtable-3level.h -@@ -58,6 +58,7 @@ - #define pud_present(x) (pud_val(x) & _PAGE_PRESENT) - #define pud_populate(mm, pud, pmd) \ - set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd))) -+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd)) - - #ifdef CONFIG_64BIT - #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval)) -diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c -index eecc414..48adb87 100644 ---- a/arch/um/kernel/process.c -+++ b/arch/um/kernel/process.c -@@ -356,22 +356,6 @@ int singlestepping(void * t) - return 2; - } - --/* -- * Only x86 and x86_64 have an arch_align_stack(). -- * All other arches have "#define arch_align_stack(x) (x)" -- * in their asm/system.h -- * As this is included in UML from asm-um/system-generic.h, -- * we can use it to behave as the subarch does. -- */ --#ifndef arch_align_stack --unsigned long arch_align_stack(unsigned long sp) --{ -- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) -- sp -= get_random_int() % 8192; -- return sp & ~0xf; --} --#endif -- - unsigned long get_wchan(struct task_struct *p) - { - unsigned long stack_page, sp, ip; -diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h -index ad8f795..2c7eec6 100644 ---- a/arch/unicore32/include/asm/cache.h -+++ b/arch/unicore32/include/asm/cache.h -@@ -12,8 +12,10 @@ - #ifndef __UNICORE_CACHE_H__ - #define __UNICORE_CACHE_H__ - --#define L1_CACHE_SHIFT (5) --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#include <linux/const.h> -+ -+#define L1_CACHE_SHIFT 5 -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - /* - * Memory returned by kmalloc() may be used for DMA, so we must make -diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 96e743a..ca34a86 100644 ---- a/arch/x86/Kconfig -+++ b/arch/x86/Kconfig -@@ -22,6 +22,7 @@ config X86_64 - config X86 - def_bool y - select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS -+ select ARCH_HAS_FAST_MULTIPLIER - select ARCH_MIGHT_HAVE_PC_PARPORT - select ARCH_MIGHT_HAVE_PC_SERIO - select HAVE_AOUT if X86_32 -@@ -126,7 +127,7 @@ config X86 - select RTC_LIB - select HAVE_DEBUG_STACKOVERFLOW - select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64 -- select HAVE_CC_STACKPROTECTOR -+ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF - select ARCH_SUPPORTS_ATOMIC_RMW - - config INSTRUCTION_DECODER -@@ -252,7 +253,7 @@ config X86_HT - - config X86_32_LAZY_GS - def_bool y -- depends on X86_32 && !CC_STACKPROTECTOR -+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF - - config ARCH_HWEIGHT_CFLAGS - string -@@ -590,6 +591,7 @@ config SCHED_OMIT_FRAME_POINTER - - menuconfig HYPERVISOR_GUEST - bool "Linux guest support" -+ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN) - ---help--- - Say Y here to enable options for running Linux under various hyper- - visors. This option enables basic hypervisor detection and platform -@@ -977,6 +979,7 @@ config VM86 - - config X86_16BIT - bool "Enable support for 16-bit segments" if EXPERT -+ depends on !GRKERNSEC - default y - ---help--- - This option is required by programs like Wine to run 16-bit -@@ -1133,7 +1136,7 @@ choice - - config NOHIGHMEM - bool "off" -- depends on !X86_NUMAQ -+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) - ---help--- - Linux can use up to 64 Gigabytes of physical memory on x86 systems. - However, the address space of 32-bit x86 processors is only 4 -@@ -1170,7 +1173,7 @@ config NOHIGHMEM - - config HIGHMEM4G - bool "4GB" -- depends on !X86_NUMAQ -+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) - ---help--- - Select this if you have a 32-bit processor and between 1 and 4 - gigabytes of physical RAM. -@@ -1223,7 +1226,7 @@ config PAGE_OFFSET - hex - default 0xB0000000 if VMSPLIT_3G_OPT - default 0x80000000 if VMSPLIT_2G -- default 0x78000000 if VMSPLIT_2G_OPT -+ default 0x70000000 if VMSPLIT_2G_OPT - default 0x40000000 if VMSPLIT_1G - default 0xC0000000 - depends on X86_32 -@@ -1628,6 +1631,7 @@ source kernel/Kconfig.hz - - config KEXEC - bool "kexec system call" -+ depends on !GRKERNSEC_KMEM - ---help--- - kexec is a system call that implements the ability to shutdown your - current kernel, and to start another kernel. It is like a reboot -@@ -1779,7 +1783,9 @@ config X86_NEED_RELOCS - - config PHYSICAL_ALIGN - hex "Alignment value to which kernel should be aligned" -- default "0x200000" -+ default "0x1000000" -+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE -+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE - range 0x2000 0x1000000 if X86_32 - range 0x200000 0x1000000 if X86_64 - ---help--- -@@ -1859,9 +1865,10 @@ config DEBUG_HOTPLUG_CPU0 - If unsure, say N. - - config COMPAT_VDSO -- def_bool y -+ def_bool n - prompt "Compat VDSO support" - depends on X86_32 || IA32_EMULATION -+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF - ---help--- - Map the 32-bit VDSO to the predictable old-style address too. - -@@ -1914,6 +1921,22 @@ config CMDLINE_OVERRIDE - This is used to work around broken boot loaders. This should - be set to 'N' under normal conditions. - -+config DEFAULT_MODIFY_LDT_SYSCALL -+ bool "Allow userspace to modify the LDT by default" -+ default y -+ -+ ---help--- -+ Modifying the LDT (Local Descriptor Table) may be needed to run a -+ 16-bit or segmented code such as Dosemu or Wine. This is done via -+ a system call which is not needed to run portable applications, -+ and which can sometimes be abused to exploit some weaknesses of -+ the architecture, opening new vulnerabilities. -+ -+ For this reason this option allows one to enable or disable the -+ feature at runtime. It is recommended to say 'N' here to leave -+ the system protected, and to enable it at runtime only if needed -+ by setting the sys.kernel.modify_ldt sysctl. -+ - endmenu - - config ARCH_ENABLE_MEMORY_HOTPLUG -diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu -index f3aaf23..a1d3c49 100644 ---- a/arch/x86/Kconfig.cpu -+++ b/arch/x86/Kconfig.cpu -@@ -319,7 +319,7 @@ config X86_PPRO_FENCE - - config X86_F00F_BUG - def_bool y -- depends on M586MMX || M586TSC || M586 || M486 -+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC - - config X86_INVD_BUG - def_bool y -@@ -327,7 +327,7 @@ config X86_INVD_BUG - - config X86_ALIGNMENT_16 - def_bool y -- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 -+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 - - config X86_INTEL_USERCOPY - def_bool y -@@ -369,7 +369,7 @@ config X86_CMPXCHG64 - # generates cmov. - config X86_CMOV - def_bool y -- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) -+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) - - config X86_MINIMUM_CPU_FAMILY - int -diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug -index 321a52c..3d51a5e 100644 ---- a/arch/x86/Kconfig.debug -+++ b/arch/x86/Kconfig.debug -@@ -84,7 +84,7 @@ config X86_PTDUMP - config DEBUG_RODATA - bool "Write protect kernel read-only data structures" - default y -- depends on DEBUG_KERNEL -+ depends on DEBUG_KERNEL && BROKEN - ---help--- - Mark the kernel read-only data as write-protected in the pagetables, - in order to catch accidental (and incorrect) writes to such const -@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST - - config DEBUG_SET_MODULE_RONX - bool "Set loadable kernel module data as NX and text as RO" -- depends on MODULES -+ depends on MODULES && BROKEN - ---help--- - This option helps catch unintended modifications to loadable - kernel module's text and read-only data. It also prevents execution -diff --git a/arch/x86/Makefile b/arch/x86/Makefile -index 0dd99ea..4a63d82 100644 ---- a/arch/x86/Makefile -+++ b/arch/x86/Makefile -@@ -71,9 +71,6 @@ ifeq ($(CONFIG_X86_32),y) - # CPU-specific tuning. Anything which can be shared with UML should go here. - include $(srctree)/arch/x86/Makefile_32.cpu - KBUILD_CFLAGS += $(cflags-y) -- -- # temporary until string.h is fixed -- KBUILD_CFLAGS += -ffreestanding - else - BITS := 64 - UTS_MACHINE := x86_64 -@@ -112,6 +109,9 @@ else - KBUILD_CFLAGS += -maccumulate-outgoing-args - endif - -+# temporary until string.h is fixed -+KBUILD_CFLAGS += -ffreestanding -+ - # Make sure compiler does not have buggy stack-protector support. - ifdef CONFIG_CC_STACKPROTECTOR - cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh -@@ -269,3 +269,12 @@ define archhelp - echo ' FDINITRD=file initrd for the booted kernel' - echo ' kvmconfig - Enable additional options for guest kernel support' - endef -+ -+define OLD_LD -+ -+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils. -+*** Please upgrade your binutils to 2.18 or newer -+endef -+ -+archprepare: -+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD))) -diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile -index 878df7e..a803913 100644 ---- a/arch/x86/boot/Makefile -+++ b/arch/x86/boot/Makefile -@@ -52,6 +52,9 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE - # --------------------------------------------------------------------------- - - KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -+ifdef CONSTIFY_PLUGIN -+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify -+endif - KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ - GCOV_PROFILE := n - -diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h -index 878e4b9..20537ab 100644 ---- a/arch/x86/boot/bitops.h -+++ b/arch/x86/boot/bitops.h -@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr) - u8 v; - const u32 *p = (const u32 *)addr; - -- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); -+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); - return v; - } - -@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr) - - static inline void set_bit(int nr, void *addr) - { -- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); -+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); - } - - #endif /* BOOT_BITOPS_H */ -diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h -index 50f8c5e..4f84fff 100644 ---- a/arch/x86/boot/boot.h -+++ b/arch/x86/boot/boot.h -@@ -84,7 +84,7 @@ static inline void io_delay(void) - static inline u16 ds(void) - { - u16 seg; -- asm("movw %%ds,%0" : "=rm" (seg)); -+ asm volatile("movw %%ds,%0" : "=rm" (seg)); - return seg; - } - -@@ -180,7 +180,7 @@ static inline void wrgs32(u32 v, addr_t addr) - static inline int memcmp(const void *s1, const void *s2, size_t len) - { - u8 diff; -- asm("repe; cmpsb; setnz %0" -+ asm volatile("repe; cmpsb; setnz %0" - : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); - return diff; - } -diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile -index 67e9f5c..2af15db 100644 ---- a/arch/x86/boot/compressed/Makefile -+++ b/arch/x86/boot/compressed/Makefile -@@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y) - KBUILD_CFLAGS += -mno-mmx -mno-sse - KBUILD_CFLAGS += $(call cc-option,-ffreestanding) - KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector) -+ifdef CONSTIFY_PLUGIN -+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify -+endif - - KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ - GCOV_PROFILE := n -diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S -index a53440e..c3dbf1e 100644 ---- a/arch/x86/boot/compressed/efi_stub_32.S -+++ b/arch/x86/boot/compressed/efi_stub_32.S -@@ -46,16 +46,13 @@ ENTRY(efi_call_phys) - * parameter 2, ..., param n. To make things easy, we save the return - * address of efi_call_phys in a global variable. - */ -- popl %ecx -- movl %ecx, saved_return_addr(%edx) -- /* get the function pointer into ECX*/ -- popl %ecx -- movl %ecx, efi_rt_function_ptr(%edx) -+ popl saved_return_addr(%edx) -+ popl efi_rt_function_ptr(%edx) - - /* - * 3. Call the physical function. - */ -- call *%ecx -+ call *efi_rt_function_ptr(%edx) - - /* - * 4. Balance the stack. And because EAX contain the return value, -@@ -67,15 +64,12 @@ ENTRY(efi_call_phys) - 1: popl %edx - subl $1b, %edx - -- movl efi_rt_function_ptr(%edx), %ecx -- pushl %ecx -+ pushl efi_rt_function_ptr(%edx) - - /* - * 10. Push the saved return address onto the stack and return. - */ -- movl saved_return_addr(%edx), %ecx -- pushl %ecx -- ret -+ jmpl *saved_return_addr(%edx) - ENDPROC(efi_call_phys) - .previous - -diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S -index a814c80..5df45f6 100644 ---- a/arch/x86/boot/compressed/head_32.S -+++ b/arch/x86/boot/compressed/head_32.S -@@ -119,10 +119,10 @@ preferred_addr: - addl %eax, %ebx - notl %eax - andl %eax, %ebx -- cmpl $LOAD_PHYSICAL_ADDR, %ebx -+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx - jge 1f - #endif -- movl $LOAD_PHYSICAL_ADDR, %ebx -+ movl $____LOAD_PHYSICAL_ADDR, %ebx - 1: - - /* Target address to relocate to for decompression */ -diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S -index 34bbc09..c126b87 100644 ---- a/arch/x86/boot/compressed/head_64.S -+++ b/arch/x86/boot/compressed/head_64.S -@@ -94,10 +94,10 @@ ENTRY(startup_32) - addl %eax, %ebx - notl %eax - andl %eax, %ebx -- cmpl $LOAD_PHYSICAL_ADDR, %ebx -+ cmpl $____LOAD_PHYSICAL_ADDR, %ebx - jge 1f - #endif -- movl $LOAD_PHYSICAL_ADDR, %ebx -+ movl $____LOAD_PHYSICAL_ADDR, %ebx - 1: - - /* Target address to relocate to for decompression */ -@@ -268,10 +268,10 @@ preferred_addr: - addq %rax, %rbp - notq %rax - andq %rax, %rbp -- cmpq $LOAD_PHYSICAL_ADDR, %rbp -+ cmpq $____LOAD_PHYSICAL_ADDR, %rbp - jge 1f - #endif -- movq $LOAD_PHYSICAL_ADDR, %rbp -+ movq $____LOAD_PHYSICAL_ADDR, %rbp - 1: - - /* Target address to relocate to for decompression */ -@@ -366,8 +366,8 @@ gdt: - .long gdt - .word 0 - .quad 0x0000000000000000 /* NULL descriptor */ -- .quad 0x00af9a000000ffff /* __KERNEL_CS */ -- .quad 0x00cf92000000ffff /* __KERNEL_DS */ -+ .quad 0x00af9b000000ffff /* __KERNEL_CS */ -+ .quad 0x00cf93000000ffff /* __KERNEL_DS */ - .quad 0x0080890000000000 /* TS descriptor */ - .quad 0x0000000000000000 /* TS continued */ - gdt_end: -diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c -index 8f45c85..fc8346a 100644 ---- a/arch/x86/boot/compressed/misc.c -+++ b/arch/x86/boot/compressed/misc.c -@@ -218,7 +218,7 @@ void __putstr(const char *s) - - void *memset(void *s, int c, size_t n) - { -- int i; -+ size_t i; - char *ss = s; - - for (i = 0; i < n; i++) -@@ -277,7 +277,7 @@ static void handle_relocations(void *output, unsigned long output_len) - * Calculate the delta between where vmlinux was linked to load - * and where it was actually loaded. - */ -- delta = min_addr - LOAD_PHYSICAL_ADDR; -+ delta = min_addr - ____LOAD_PHYSICAL_ADDR; - if (!delta) { - debug_putstr("No relocation needed... "); - return; -@@ -347,7 +347,7 @@ static void parse_elf(void *output) - Elf32_Ehdr ehdr; - Elf32_Phdr *phdrs, *phdr; - #endif -- void *dest; -+ void *dest, *prev; - int i; - - memcpy(&ehdr, output, sizeof(ehdr)); -@@ -374,13 +374,16 @@ static void parse_elf(void *output) - case PT_LOAD: - #ifdef CONFIG_RELOCATABLE - dest = output; -- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); -+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR); - #else - dest = (void *)(phdr->p_paddr); - #endif - memcpy(dest, - output + phdr->p_offset, - phdr->p_filesz); -+ if (i) -+ memset(prev, 0xff, dest - prev); -+ prev = dest + phdr->p_filesz; - break; - default: /* Ignore other PT_* */ break; - } -@@ -439,7 +442,7 @@ asmlinkage void *decompress_kernel(void *rmode, memptr heap, - error("Destination address too large"); - #endif - #ifndef CONFIG_RELOCATABLE -- if ((unsigned long)output != LOAD_PHYSICAL_ADDR) -+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR) - error("Wrong destination address"); - #endif - -diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c -index 100a9a1..bb3bdb0 100644 ---- a/arch/x86/boot/cpucheck.c -+++ b/arch/x86/boot/cpucheck.c -@@ -117,9 +117,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) - u32 ecx = MSR_K7_HWCR; - u32 eax, edx; - -- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); -+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); - eax &= ~(1 << 15); -- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); -+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); - - get_cpuflags(); /* Make sure it really did something */ - err = check_cpuflags(); -@@ -132,9 +132,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) - u32 ecx = MSR_VIA_FCR; - u32 eax, edx; - -- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); -+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); - eax |= (1<<1)|(1<<7); -- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); -+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); - - set_bit(X86_FEATURE_CX8, cpu.flags); - err = check_cpuflags(); -@@ -145,12 +145,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) - u32 eax, edx; - u32 level = 1; - -- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); -- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); -- asm("cpuid" -+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); -+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); -+ asm volatile("cpuid" - : "+a" (level), "=d" (cpu.flags[0]) - : : "ecx", "ebx"); -- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); -+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); - - err = check_cpuflags(); - } -diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S -index 04da6c2..a151f55 100644 ---- a/arch/x86/boot/header.S -+++ b/arch/x86/boot/header.S -@@ -434,10 +434,14 @@ setup_data: .quad 0 # 64-bit physical pointer to - # single linked list of - # struct setup_data - --pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr -+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr - - #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset) -+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) -+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR) -+#else - #define VO_INIT_SIZE (VO__end - VO__text) -+#endif - #if ZO_INIT_SIZE > VO_INIT_SIZE - #define INIT_SIZE ZO_INIT_SIZE - #else -diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c -index db75d07..8e6d0af 100644 ---- a/arch/x86/boot/memory.c -+++ b/arch/x86/boot/memory.c -@@ -19,7 +19,7 @@ - - static int detect_memory_e820(void) - { -- int count = 0; -+ unsigned int count = 0; - struct biosregs ireg, oreg; - struct e820entry *desc = boot_params.e820_map; - static struct e820entry buf; /* static so it is zeroed */ -diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c -index 11e8c6e..fdbb1ed 100644 ---- a/arch/x86/boot/video-vesa.c -+++ b/arch/x86/boot/video-vesa.c -@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void) - - boot_params.screen_info.vesapm_seg = oreg.es; - boot_params.screen_info.vesapm_off = oreg.di; -+ boot_params.screen_info.vesapm_size = oreg.cx; - } - - /* -diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c -index 43eda28..5ab5fdb 100644 ---- a/arch/x86/boot/video.c -+++ b/arch/x86/boot/video.c -@@ -96,7 +96,7 @@ static void store_mode_params(void) - static unsigned int get_entry(void) - { - char entry_buf[4]; -- int i, len = 0; -+ unsigned int i, len = 0; - int key; - unsigned int v; - -diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S -index 9105655..41779c1 100644 ---- a/arch/x86/crypto/aes-x86_64-asm_64.S -+++ b/arch/x86/crypto/aes-x86_64-asm_64.S -@@ -8,6 +8,8 @@ - * including this sentence is retained in full. - */ - -+#include <asm/alternative-asm.h> -+ - .extern crypto_ft_tab - .extern crypto_it_tab - .extern crypto_fl_tab -@@ -70,6 +72,8 @@ - je B192; \ - leaq 32(r9),r9; - -+#define ret pax_force_retaddr; ret -+ - #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \ - movq r1,r2; \ - movq r3,r4; \ -diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S -index 477e9d7..c92c7d8 100644 ---- a/arch/x86/crypto/aesni-intel_asm.S -+++ b/arch/x86/crypto/aesni-intel_asm.S -@@ -31,6 +31,7 @@ - - #include <linux/linkage.h> - #include <asm/inst.h> -+#include <asm/alternative-asm.h> - - #ifdef __x86_64__ - .data -@@ -205,7 +206,7 @@ enc: .octa 0x2 - * num_initial_blocks = b mod 4 - * encrypt the initial num_initial_blocks blocks and apply ghash on - * the ciphertext --* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers -+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers - * are clobbered - * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified - */ -@@ -214,8 +215,8 @@ enc: .octa 0x2 - .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ - XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation - mov arg7, %r10 # %r10 = AAD -- mov arg8, %r12 # %r12 = aadLen -- mov %r12, %r11 -+ mov arg8, %r15 # %r15 = aadLen -+ mov %r15, %r11 - pxor %xmm\i, %xmm\i - _get_AAD_loop\num_initial_blocks\operation: - movd (%r10), \TMP1 -@@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation: - psrldq $4, %xmm\i - pxor \TMP1, %xmm\i - add $4, %r10 -- sub $4, %r12 -+ sub $4, %r15 - jne _get_AAD_loop\num_initial_blocks\operation - cmp $16, %r11 - je _get_AAD_loop2_done\num_initial_blocks\operation -- mov $16, %r12 -+ mov $16, %r15 - _get_AAD_loop2\num_initial_blocks\operation: - psrldq $4, %xmm\i -- sub $4, %r12 -- cmp %r11, %r12 -+ sub $4, %r15 -+ cmp %r11, %r15 - jne _get_AAD_loop2\num_initial_blocks\operation - _get_AAD_loop2_done\num_initial_blocks\operation: - movdqa SHUF_MASK(%rip), %xmm14 -@@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation: - * num_initial_blocks = b mod 4 - * encrypt the initial num_initial_blocks blocks and apply ghash on - * the ciphertext --* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers -+* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers - * are clobbered - * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified - */ -@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation: - .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \ - XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation - mov arg7, %r10 # %r10 = AAD -- mov arg8, %r12 # %r12 = aadLen -- mov %r12, %r11 -+ mov arg8, %r15 # %r15 = aadLen -+ mov %r15, %r11 - pxor %xmm\i, %xmm\i - _get_AAD_loop\num_initial_blocks\operation: - movd (%r10), \TMP1 -@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation: - psrldq $4, %xmm\i - pxor \TMP1, %xmm\i - add $4, %r10 -- sub $4, %r12 -+ sub $4, %r15 - jne _get_AAD_loop\num_initial_blocks\operation - cmp $16, %r11 - je _get_AAD_loop2_done\num_initial_blocks\operation -- mov $16, %r12 -+ mov $16, %r15 - _get_AAD_loop2\num_initial_blocks\operation: - psrldq $4, %xmm\i -- sub $4, %r12 -- cmp %r11, %r12 -+ sub $4, %r15 -+ cmp %r11, %r15 - jne _get_AAD_loop2\num_initial_blocks\operation - _get_AAD_loop2_done\num_initial_blocks\operation: - movdqa SHUF_MASK(%rip), %xmm14 -@@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst - * - *****************************************************************************/ - ENTRY(aesni_gcm_dec) -- push %r12 -+ push %r15 - push %r13 - push %r14 - mov %rsp, %r14 -@@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec) - */ - sub $VARIABLE_OFFSET, %rsp - and $~63, %rsp # align rsp to 64 bytes -- mov %arg6, %r12 -- movdqu (%r12), %xmm13 # %xmm13 = HashKey -+ mov %arg6, %r15 -+ movdqu (%r15), %xmm13 # %xmm13 = HashKey - movdqa SHUF_MASK(%rip), %xmm2 - PSHUFB_XMM %xmm2, %xmm13 - -@@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec) - movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly) - mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext - and $-16, %r13 # %r13 = %r13 - (%r13 mod 16) -- mov %r13, %r12 -- and $(3<<4), %r12 -+ mov %r13, %r15 -+ and $(3<<4), %r15 - jz _initial_num_blocks_is_0_decrypt -- cmp $(2<<4), %r12 -+ cmp $(2<<4), %r15 - jb _initial_num_blocks_is_1_decrypt - je _initial_num_blocks_is_2_decrypt - _initial_num_blocks_is_3_decrypt: -@@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt: - sub $16, %r11 - add %r13, %r11 - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block -- lea SHIFT_MASK+16(%rip), %r12 -- sub %r13, %r12 -+ lea SHIFT_MASK+16(%rip), %r15 -+ sub %r13, %r15 - # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes - # (%r13 is the number of bytes in plaintext mod 16) -- movdqu (%r12), %xmm2 # get the appropriate shuffle mask -+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask - PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes - - movdqa %xmm1, %xmm2 - pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn) -- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 -+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1 - # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0 - pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0 - pand %xmm1, %xmm2 -@@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt: - sub $1, %r13 - jne _less_than_8_bytes_left_decrypt - _multiple_of_16_bytes_decrypt: -- mov arg8, %r12 # %r13 = aadLen (number of bytes) -- shl $3, %r12 # convert into number of bits -- movd %r12d, %xmm15 # len(A) in %xmm15 -+ mov arg8, %r15 # %r13 = aadLen (number of bytes) -+ shl $3, %r15 # convert into number of bits -+ movd %r15d, %xmm15 # len(A) in %xmm15 - shl $3, %arg4 # len(C) in bits (*128) - MOVQ_R64_XMM %arg4, %xmm1 - pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 -@@ -1440,7 +1441,8 @@ _return_T_done_decrypt: - mov %r14, %rsp - pop %r14 - pop %r13 -- pop %r12 -+ pop %r15 -+ pax_force_retaddr - ret - ENDPROC(aesni_gcm_dec) - -@@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec) - * poly = x^128 + x^127 + x^126 + x^121 + 1 - ***************************************************************************/ - ENTRY(aesni_gcm_enc) -- push %r12 -+ push %r15 - push %r13 - push %r14 - mov %rsp, %r14 -@@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc) - # - sub $VARIABLE_OFFSET, %rsp - and $~63, %rsp -- mov %arg6, %r12 -- movdqu (%r12), %xmm13 -+ mov %arg6, %r15 -+ movdqu (%r15), %xmm13 - movdqa SHUF_MASK(%rip), %xmm2 - PSHUFB_XMM %xmm2, %xmm13 - -@@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc) - movdqa %xmm13, HashKey(%rsp) - mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly) - and $-16, %r13 -- mov %r13, %r12 -+ mov %r13, %r15 - - # Encrypt first few blocks - -- and $(3<<4), %r12 -+ and $(3<<4), %r15 - jz _initial_num_blocks_is_0_encrypt -- cmp $(2<<4), %r12 -+ cmp $(2<<4), %r15 - jb _initial_num_blocks_is_1_encrypt - je _initial_num_blocks_is_2_encrypt - _initial_num_blocks_is_3_encrypt: -@@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt: - sub $16, %r11 - add %r13, %r11 - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks -- lea SHIFT_MASK+16(%rip), %r12 -- sub %r13, %r12 -+ lea SHIFT_MASK+16(%rip), %r15 -+ sub %r13, %r15 - # adjust the shuffle mask pointer to be able to shift 16-r13 bytes - # (%r13 is the number of bytes in plaintext mod 16) -- movdqu (%r12), %xmm2 # get the appropriate shuffle mask -+ movdqu (%r15), %xmm2 # get the appropriate shuffle mask - PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte - pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn) -- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1 -+ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1 - # get the appropriate mask to mask out top 16-r13 bytes of xmm0 - pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0 - movdqa SHUF_MASK(%rip), %xmm10 -@@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt: - sub $1, %r13 - jne _less_than_8_bytes_left_encrypt - _multiple_of_16_bytes_encrypt: -- mov arg8, %r12 # %r12 = addLen (number of bytes) -- shl $3, %r12 -- movd %r12d, %xmm15 # len(A) in %xmm15 -+ mov arg8, %r15 # %r15 = addLen (number of bytes) -+ shl $3, %r15 -+ movd %r15d, %xmm15 # len(A) in %xmm15 - shl $3, %arg4 # len(C) in bits (*128) - MOVQ_R64_XMM %arg4, %xmm1 - pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000 -@@ -1704,7 +1706,8 @@ _return_T_done_encrypt: - mov %r14, %rsp - pop %r14 - pop %r13 -- pop %r12 -+ pop %r15 -+ pax_force_retaddr - ret - ENDPROC(aesni_gcm_enc) - -@@ -1722,6 +1725,7 @@ _key_expansion_256a: - pxor %xmm1, %xmm0 - movaps %xmm0, (TKEYP) - add $0x10, TKEYP -+ pax_force_retaddr - ret - ENDPROC(_key_expansion_128) - ENDPROC(_key_expansion_256a) -@@ -1748,6 +1752,7 @@ _key_expansion_192a: - shufps $0b01001110, %xmm2, %xmm1 - movaps %xmm1, 0x10(TKEYP) - add $0x20, TKEYP -+ pax_force_retaddr - ret - ENDPROC(_key_expansion_192a) - -@@ -1768,6 +1773,7 @@ _key_expansion_192b: - - movaps %xmm0, (TKEYP) - add $0x10, TKEYP -+ pax_force_retaddr - ret - ENDPROC(_key_expansion_192b) - -@@ -1781,6 +1787,7 @@ _key_expansion_256b: - pxor %xmm1, %xmm2 - movaps %xmm2, (TKEYP) - add $0x10, TKEYP -+ pax_force_retaddr - ret - ENDPROC(_key_expansion_256b) - -@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key) - #ifndef __x86_64__ - popl KEYP - #endif -+ pax_force_retaddr - ret - ENDPROC(aesni_set_key) - -@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc) - popl KLEN - popl KEYP - #endif -+ pax_force_retaddr - ret - ENDPROC(aesni_enc) - -@@ -1974,6 +1983,7 @@ _aesni_enc1: - AESENC KEY STATE - movaps 0x70(TKEYP), KEY - AESENCLAST KEY STATE -+ pax_force_retaddr - ret - ENDPROC(_aesni_enc1) - -@@ -2083,6 +2093,7 @@ _aesni_enc4: - AESENCLAST KEY STATE2 - AESENCLAST KEY STATE3 - AESENCLAST KEY STATE4 -+ pax_force_retaddr - ret - ENDPROC(_aesni_enc4) - -@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec) - popl KLEN - popl KEYP - #endif -+ pax_force_retaddr - ret - ENDPROC(aesni_dec) - -@@ -2164,6 +2176,7 @@ _aesni_dec1: - AESDEC KEY STATE - movaps 0x70(TKEYP), KEY - AESDECLAST KEY STATE -+ pax_force_retaddr - ret - ENDPROC(_aesni_dec1) - -@@ -2273,6 +2286,7 @@ _aesni_dec4: - AESDECLAST KEY STATE2 - AESDECLAST KEY STATE3 - AESDECLAST KEY STATE4 -+ pax_force_retaddr - ret - ENDPROC(_aesni_dec4) - -@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc) - popl KEYP - popl LEN - #endif -+ pax_force_retaddr - ret - ENDPROC(aesni_ecb_enc) - -@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec) - popl KEYP - popl LEN - #endif -+ pax_force_retaddr - ret - ENDPROC(aesni_ecb_dec) - -@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc) - popl LEN - popl IVP - #endif -+ pax_force_retaddr - ret - ENDPROC(aesni_cbc_enc) - -@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec) - popl LEN - popl IVP - #endif -+ pax_force_retaddr - ret - ENDPROC(aesni_cbc_dec) - -@@ -2550,6 +2568,7 @@ _aesni_inc_init: - mov $1, TCTR_LOW - MOVQ_R64_XMM TCTR_LOW INC - MOVQ_R64_XMM CTR TCTR_LOW -+ pax_force_retaddr - ret - ENDPROC(_aesni_inc_init) - -@@ -2579,6 +2598,7 @@ _aesni_inc: - .Linc_low: - movaps CTR, IV - PSHUFB_XMM BSWAP_MASK IV -+ pax_force_retaddr - ret - ENDPROC(_aesni_inc) - -@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc) - .Lctr_enc_ret: - movups IV, (IVP) - .Lctr_enc_just_ret: -+ pax_force_retaddr - ret - ENDPROC(aesni_ctr_enc) - -@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8) - pxor INC, STATE4 - movdqu STATE4, 0x70(OUTP) - -+ pax_force_retaddr - ret - ENDPROC(aesni_xts_crypt8) - -diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S -index 246c670..466e2d6 100644 ---- a/arch/x86/crypto/blowfish-x86_64-asm_64.S -+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S -@@ -21,6 +21,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - .file "blowfish-x86_64-asm.S" - .text -@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk) - jnz .L__enc_xor; - - write_block(); -+ pax_force_retaddr - ret; - .L__enc_xor: - xor_block(); -+ pax_force_retaddr - ret; - ENDPROC(__blowfish_enc_blk) - -@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk) - - movq %r11, %rbp; - -+ pax_force_retaddr - ret; - ENDPROC(blowfish_dec_blk) - -@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way) - - popq %rbx; - popq %rbp; -+ pax_force_retaddr - ret; - - .L__enc_xor4: -@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way) - - popq %rbx; - popq %rbp; -+ pax_force_retaddr - ret; - ENDPROC(__blowfish_enc_blk_4way) - -@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way) - popq %rbx; - popq %rbp; - -+ pax_force_retaddr - ret; - ENDPROC(blowfish_dec_blk_4way) -diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S -index ce71f92..1dce7ec 100644 ---- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S -+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S -@@ -16,6 +16,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - #define CAMELLIA_TABLE_BYTE_LEN 272 - -@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: - roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, - %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, - %rcx, (%r9)); -+ pax_force_retaddr - ret; - ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) - -@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: - roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, - %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11, - %rax, (%r9)); -+ pax_force_retaddr - ret; - ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) - -@@ -780,6 +783,7 @@ __camellia_enc_blk16: - %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, - %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax)); - -+ pax_force_retaddr - ret; - - .align 8 -@@ -865,6 +869,7 @@ __camellia_dec_blk16: - %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, - %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); - -+ pax_force_retaddr - ret; - - .align 8 -@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way) - %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, - %xmm8, %rsi); - -+ pax_force_retaddr - ret; - ENDPROC(camellia_ecb_enc_16way) - -@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way) - %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, - %xmm8, %rsi); - -+ pax_force_retaddr - ret; - ENDPROC(camellia_ecb_dec_16way) - -@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way) - %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, - %xmm8, %rsi); - -+ pax_force_retaddr - ret; - ENDPROC(camellia_cbc_dec_16way) - -@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way) - %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, - %xmm8, %rsi); - -+ pax_force_retaddr - ret; - ENDPROC(camellia_ctr_16way) - -@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way: - %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, - %xmm8, %rsi); - -+ pax_force_retaddr - ret; - ENDPROC(camellia_xts_crypt_16way) - -diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S -index 0e0b886..5a3123c 100644 ---- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S -+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S -@@ -11,6 +11,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - #define CAMELLIA_TABLE_BYTE_LEN 272 - -@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: - roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, - %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, - %rcx, (%r9)); -+ pax_force_retaddr - ret; - ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) - -@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: - roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, - %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, - %rax, (%r9)); -+ pax_force_retaddr - ret; - ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) - -@@ -820,6 +823,7 @@ __camellia_enc_blk32: - %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, - %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax)); - -+ pax_force_retaddr - ret; - - .align 8 -@@ -905,6 +909,7 @@ __camellia_dec_blk32: - %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, - %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); - -+ pax_force_retaddr - ret; - - .align 8 -@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way) - - vzeroupper; - -+ pax_force_retaddr - ret; - ENDPROC(camellia_ecb_enc_32way) - -@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way) - - vzeroupper; - -+ pax_force_retaddr - ret; - ENDPROC(camellia_ecb_dec_32way) - -@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way) - - vzeroupper; - -+ pax_force_retaddr - ret; - ENDPROC(camellia_cbc_dec_32way) - -@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way) - - vzeroupper; - -+ pax_force_retaddr - ret; - ENDPROC(camellia_ctr_32way) - -@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way: - - vzeroupper; - -+ pax_force_retaddr - ret; - ENDPROC(camellia_xts_crypt_32way) - -diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S -index 310319c..db3d7b5 100644 ---- a/arch/x86/crypto/camellia-x86_64-asm_64.S -+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S -@@ -21,6 +21,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - .file "camellia-x86_64-asm_64.S" - .text -@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk) - enc_outunpack(mov, RT1); - - movq RRBP, %rbp; -+ pax_force_retaddr - ret; - - .L__enc_xor: - enc_outunpack(xor, RT1); - - movq RRBP, %rbp; -+ pax_force_retaddr - ret; - ENDPROC(__camellia_enc_blk) - -@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk) - dec_outunpack(); - - movq RRBP, %rbp; -+ pax_force_retaddr - ret; - ENDPROC(camellia_dec_blk) - -@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way) - - movq RRBP, %rbp; - popq %rbx; -+ pax_force_retaddr - ret; - - .L__enc2_xor: -@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way) - - movq RRBP, %rbp; - popq %rbx; -+ pax_force_retaddr - ret; - ENDPROC(__camellia_enc_blk_2way) - -@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way) - - movq RRBP, %rbp; - movq RXOR, %rbx; -+ pax_force_retaddr - ret; - ENDPROC(camellia_dec_blk_2way) -diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S -index c35fd5d..2d8c7db 100644 ---- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S -+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S -@@ -24,6 +24,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - .file "cast5-avx-x86_64-asm_64.S" - -@@ -281,6 +282,7 @@ __cast5_enc_blk16: - outunpack_blocks(RR3, RL3, RTMP, RX, RKM); - outunpack_blocks(RR4, RL4, RTMP, RX, RKM); - -+ pax_force_retaddr - ret; - ENDPROC(__cast5_enc_blk16) - -@@ -352,6 +354,7 @@ __cast5_dec_blk16: - outunpack_blocks(RR3, RL3, RTMP, RX, RKM); - outunpack_blocks(RR4, RL4, RTMP, RX, RKM); - -+ pax_force_retaddr - ret; - - .L__skip_dec: -@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way) - vmovdqu RR4, (6*4*4)(%r11); - vmovdqu RL4, (7*4*4)(%r11); - -+ pax_force_retaddr - ret; - ENDPROC(cast5_ecb_enc_16way) - -@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way) - vmovdqu RR4, (6*4*4)(%r11); - vmovdqu RL4, (7*4*4)(%r11); - -+ pax_force_retaddr - ret; - ENDPROC(cast5_ecb_dec_16way) - -@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way) - * %rdx: src - */ - -- pushq %r12; -+ pushq %r14; - - movq %rsi, %r11; -- movq %rdx, %r12; -+ movq %rdx, %r14; - - vmovdqu (0*16)(%rdx), RL1; - vmovdqu (1*16)(%rdx), RR1; -@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way) - call __cast5_dec_blk16; - - /* xor with src */ -- vmovq (%r12), RX; -+ vmovq (%r14), RX; - vpshufd $0x4f, RX, RX; - vpxor RX, RR1, RR1; -- vpxor 0*16+8(%r12), RL1, RL1; -- vpxor 1*16+8(%r12), RR2, RR2; -- vpxor 2*16+8(%r12), RL2, RL2; -- vpxor 3*16+8(%r12), RR3, RR3; -- vpxor 4*16+8(%r12), RL3, RL3; -- vpxor 5*16+8(%r12), RR4, RR4; -- vpxor 6*16+8(%r12), RL4, RL4; -+ vpxor 0*16+8(%r14), RL1, RL1; -+ vpxor 1*16+8(%r14), RR2, RR2; -+ vpxor 2*16+8(%r14), RL2, RL2; -+ vpxor 3*16+8(%r14), RR3, RR3; -+ vpxor 4*16+8(%r14), RL3, RL3; -+ vpxor 5*16+8(%r14), RR4, RR4; -+ vpxor 6*16+8(%r14), RL4, RL4; - - vmovdqu RR1, (0*16)(%r11); - vmovdqu RL1, (1*16)(%r11); -@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way) - vmovdqu RR4, (6*16)(%r11); - vmovdqu RL4, (7*16)(%r11); - -- popq %r12; -+ popq %r14; - -+ pax_force_retaddr - ret; - ENDPROC(cast5_cbc_dec_16way) - -@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way) - * %rcx: iv (big endian, 64bit) - */ - -- pushq %r12; -+ pushq %r14; - - movq %rsi, %r11; -- movq %rdx, %r12; -+ movq %rdx, %r14; - - vpcmpeqd RTMP, RTMP, RTMP; - vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */ -@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way) - call __cast5_enc_blk16; - - /* dst = src ^ iv */ -- vpxor (0*16)(%r12), RR1, RR1; -- vpxor (1*16)(%r12), RL1, RL1; -- vpxor (2*16)(%r12), RR2, RR2; -- vpxor (3*16)(%r12), RL2, RL2; -- vpxor (4*16)(%r12), RR3, RR3; -- vpxor (5*16)(%r12), RL3, RL3; -- vpxor (6*16)(%r12), RR4, RR4; -- vpxor (7*16)(%r12), RL4, RL4; -+ vpxor (0*16)(%r14), RR1, RR1; -+ vpxor (1*16)(%r14), RL1, RL1; -+ vpxor (2*16)(%r14), RR2, RR2; -+ vpxor (3*16)(%r14), RL2, RL2; -+ vpxor (4*16)(%r14), RR3, RR3; -+ vpxor (5*16)(%r14), RL3, RL3; -+ vpxor (6*16)(%r14), RR4, RR4; -+ vpxor (7*16)(%r14), RL4, RL4; - vmovdqu RR1, (0*16)(%r11); - vmovdqu RL1, (1*16)(%r11); - vmovdqu RR2, (2*16)(%r11); -@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way) - vmovdqu RR4, (6*16)(%r11); - vmovdqu RL4, (7*16)(%r11); - -- popq %r12; -+ popq %r14; - -+ pax_force_retaddr - ret; - ENDPROC(cast5_ctr_16way) -diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S -index e3531f8..e123f35 100644 ---- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S -+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S -@@ -24,6 +24,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - #include "glue_helper-asm-avx.S" - - .file "cast6-avx-x86_64-asm_64.S" -@@ -295,6 +296,7 @@ __cast6_enc_blk8: - outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); - outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); - -+ pax_force_retaddr - ret; - ENDPROC(__cast6_enc_blk8) - -@@ -340,6 +342,7 @@ __cast6_dec_blk8: - outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM); - outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); - -+ pax_force_retaddr - ret; - ENDPROC(__cast6_dec_blk8) - -@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way) - - store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -+ pax_force_retaddr - ret; - ENDPROC(cast6_ecb_enc_8way) - -@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way) - - store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -+ pax_force_retaddr - ret; - ENDPROC(cast6_ecb_dec_8way) - -@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way) - * %rdx: src - */ - -- pushq %r12; -+ pushq %r14; - - movq %rsi, %r11; -- movq %rdx, %r12; -+ movq %rdx, %r14; - - load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - - call __cast6_dec_blk8; - -- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); -+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -- popq %r12; -+ popq %r14; - -+ pax_force_retaddr - ret; - ENDPROC(cast6_cbc_dec_8way) - -@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way) - * %rcx: iv (little endian, 128bit) - */ - -- pushq %r12; -+ pushq %r14; - - movq %rsi, %r11; -- movq %rdx, %r12; -+ movq %rdx, %r14; - - load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RX, RKR, RKM); - - call __cast6_enc_blk8; - -- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); -+ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -- popq %r12; -+ popq %r14; - -+ pax_force_retaddr - ret; - ENDPROC(cast6_ctr_8way) - -@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way) - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -+ pax_force_retaddr - ret; - ENDPROC(cast6_xts_enc_8way) - -@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way) - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -+ pax_force_retaddr - ret; - ENDPROC(cast6_xts_dec_8way) -diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S -index dbc4339..de6e120 100644 ---- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S -+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S -@@ -45,6 +45,7 @@ - - #include <asm/inst.h> - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction - -@@ -312,6 +313,7 @@ do_return: - popq %rsi - popq %rdi - popq %rbx -+ pax_force_retaddr - ret - - ################################################################ -diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S -index 185fad4..ff4cd36 100644 ---- a/arch/x86/crypto/ghash-clmulni-intel_asm.S -+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S -@@ -18,6 +18,7 @@ - - #include <linux/linkage.h> - #include <asm/inst.h> -+#include <asm/alternative-asm.h> - - .data - -@@ -89,6 +90,7 @@ __clmul_gf128mul_ble: - psrlq $1, T2 - pxor T2, T1 - pxor T1, DATA -+ pax_force_retaddr - ret - ENDPROC(__clmul_gf128mul_ble) - -@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul) - call __clmul_gf128mul_ble - PSHUFB_XMM BSWAP DATA - movups DATA, (%rdi) -+ pax_force_retaddr - ret - ENDPROC(clmul_ghash_mul) - -@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update) - PSHUFB_XMM BSWAP DATA - movups DATA, (%rdi) - .Lupdate_just_ret: -+ pax_force_retaddr - ret - ENDPROC(clmul_ghash_update) -diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S -index 9279e0b..c4b3d2c 100644 ---- a/arch/x86/crypto/salsa20-x86_64-asm_64.S -+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S -@@ -1,4 +1,5 @@ - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - # enter salsa20_encrypt_bytes - ENTRY(salsa20_encrypt_bytes) -@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes) - add %r11,%rsp - mov %rdi,%rax - mov %rsi,%rdx -+ pax_force_retaddr - ret - # bytesatleast65: - ._bytesatleast65: -@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup) - add %r11,%rsp - mov %rdi,%rax - mov %rsi,%rdx -+ pax_force_retaddr - ret - ENDPROC(salsa20_keysetup) - -@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup) - add %r11,%rsp - mov %rdi,%rax - mov %rsi,%rdx -+ pax_force_retaddr - ret - ENDPROC(salsa20_ivsetup) -diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S -index 2f202f4..d9164d6 100644 ---- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S -+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S -@@ -24,6 +24,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - #include "glue_helper-asm-avx.S" - - .file "serpent-avx-x86_64-asm_64.S" -@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx: - write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); - write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); - -+ pax_force_retaddr - ret; - ENDPROC(__serpent_enc_blk8_avx) - -@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx: - write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); - write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); - -+ pax_force_retaddr - ret; - ENDPROC(__serpent_dec_blk8_avx) - -@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx) - - store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -+ pax_force_retaddr - ret; - ENDPROC(serpent_ecb_enc_8way_avx) - -@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx) - - store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); - -+ pax_force_retaddr - ret; - ENDPROC(serpent_ecb_dec_8way_avx) - -@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx) - - store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); - -+ pax_force_retaddr - ret; - ENDPROC(serpent_cbc_dec_8way_avx) - -@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx) - - store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -+ pax_force_retaddr - ret; - ENDPROC(serpent_ctr_8way_avx) - -@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx) - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -+ pax_force_retaddr - ret; - ENDPROC(serpent_xts_enc_8way_avx) - -@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx) - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2); - -+ pax_force_retaddr - ret; - ENDPROC(serpent_xts_dec_8way_avx) -diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S -index b222085..abd483c 100644 ---- a/arch/x86/crypto/serpent-avx2-asm_64.S -+++ b/arch/x86/crypto/serpent-avx2-asm_64.S -@@ -15,6 +15,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - #include "glue_helper-asm-avx2.S" - - .file "serpent-avx2-asm_64.S" -@@ -610,6 +611,7 @@ __serpent_enc_blk16: - write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2); - write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); - -+ pax_force_retaddr - ret; - ENDPROC(__serpent_enc_blk16) - -@@ -664,6 +666,7 @@ __serpent_dec_blk16: - write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2); - write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); - -+ pax_force_retaddr - ret; - ENDPROC(__serpent_dec_blk16) - -@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way) - - vzeroupper; - -+ pax_force_retaddr - ret; - ENDPROC(serpent_ecb_enc_16way) - -@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way) - - vzeroupper; - -+ pax_force_retaddr - ret; - ENDPROC(serpent_ecb_dec_16way) - -@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way) - - vzeroupper; - -+ pax_force_retaddr - ret; - ENDPROC(serpent_cbc_dec_16way) - -@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way) - - vzeroupper; - -+ pax_force_retaddr - ret; - ENDPROC(serpent_ctr_16way) - -@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way) - - vzeroupper; - -+ pax_force_retaddr - ret; - ENDPROC(serpent_xts_enc_16way) - -@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way) - - vzeroupper; - -+ pax_force_retaddr - ret; - ENDPROC(serpent_xts_dec_16way) -diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S -index acc066c..1559cc4 100644 ---- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S -+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S -@@ -25,6 +25,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - .file "serpent-sse2-x86_64-asm_64.S" - .text -@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way) - write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); - write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); - -+ pax_force_retaddr - ret; - - .L__enc_xor8: - xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2); - xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2); - -+ pax_force_retaddr - ret; - ENDPROC(__serpent_enc_blk_8way) - -@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way) - write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2); - write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2); - -+ pax_force_retaddr - ret; - ENDPROC(serpent_dec_blk_8way) -diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S -index a410950..9dfe7ad 100644 ---- a/arch/x86/crypto/sha1_ssse3_asm.S -+++ b/arch/x86/crypto/sha1_ssse3_asm.S -@@ -29,6 +29,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - #define CTX %rdi // arg1 - #define BUF %rsi // arg2 -@@ -75,9 +76,9 @@ - - push %rbx - push %rbp -- push %r12 -+ push %r14 - -- mov %rsp, %r12 -+ mov %rsp, %r14 - sub $64, %rsp # allocate workspace - and $~15, %rsp # align stack - -@@ -99,11 +100,12 @@ - xor %rax, %rax - rep stosq - -- mov %r12, %rsp # deallocate workspace -+ mov %r14, %rsp # deallocate workspace - -- pop %r12 -+ pop %r14 - pop %rbp - pop %rbx -+ pax_force_retaddr - ret - - ENDPROC(\name) -diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S -index 642f156..51a513c 100644 ---- a/arch/x86/crypto/sha256-avx-asm.S -+++ b/arch/x86/crypto/sha256-avx-asm.S -@@ -49,6 +49,7 @@ - - #ifdef CONFIG_AS_AVX - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - ## assume buffers not aligned - #define VMOVDQ vmovdqu -@@ -460,6 +461,7 @@ done_hash: - popq %r13 - popq %rbp - popq %rbx -+ pax_force_retaddr - ret - ENDPROC(sha256_transform_avx) - -diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S -index 9e86944..3795e6a 100644 ---- a/arch/x86/crypto/sha256-avx2-asm.S -+++ b/arch/x86/crypto/sha256-avx2-asm.S -@@ -50,6 +50,7 @@ - - #ifdef CONFIG_AS_AVX2 - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - ## assume buffers not aligned - #define VMOVDQ vmovdqu -@@ -720,6 +721,7 @@ done_hash: - popq %r12 - popq %rbp - popq %rbx -+ pax_force_retaddr - ret - ENDPROC(sha256_transform_rorx) - -diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S -index f833b74..8c62a9e 100644 ---- a/arch/x86/crypto/sha256-ssse3-asm.S -+++ b/arch/x86/crypto/sha256-ssse3-asm.S -@@ -47,6 +47,7 @@ - ######################################################################## - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - ## assume buffers not aligned - #define MOVDQ movdqu -@@ -471,6 +472,7 @@ done_hash: - popq %rbp - popq %rbx - -+ pax_force_retaddr - ret - ENDPROC(sha256_transform_ssse3) - -diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S -index 974dde9..a823ff9 100644 ---- a/arch/x86/crypto/sha512-avx-asm.S -+++ b/arch/x86/crypto/sha512-avx-asm.S -@@ -49,6 +49,7 @@ - - #ifdef CONFIG_AS_AVX - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - .text - -@@ -364,6 +365,7 @@ updateblock: - mov frame_RSPSAVE(%rsp), %rsp - - nowork: -+ pax_force_retaddr - ret - ENDPROC(sha512_transform_avx) - -diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S -index 568b961..ed20c37 100644 ---- a/arch/x86/crypto/sha512-avx2-asm.S -+++ b/arch/x86/crypto/sha512-avx2-asm.S -@@ -51,6 +51,7 @@ - - #ifdef CONFIG_AS_AVX2 - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - .text - -@@ -678,6 +679,7 @@ done_hash: - - # Restore Stack Pointer - mov frame_RSPSAVE(%rsp), %rsp -+ pax_force_retaddr - ret - ENDPROC(sha512_transform_rorx) - -diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S -index fb56855..6edd768 100644 ---- a/arch/x86/crypto/sha512-ssse3-asm.S -+++ b/arch/x86/crypto/sha512-ssse3-asm.S -@@ -48,6 +48,7 @@ - ######################################################################## - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - .text - -@@ -363,6 +364,7 @@ updateblock: - mov frame_RSPSAVE(%rsp), %rsp - - nowork: -+ pax_force_retaddr - ret - ENDPROC(sha512_transform_ssse3) - -diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S -index 0505813..b067311 100644 ---- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S -+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S -@@ -24,6 +24,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - #include "glue_helper-asm-avx.S" - - .file "twofish-avx-x86_64-asm_64.S" -@@ -284,6 +285,7 @@ __twofish_enc_blk8: - outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2); - outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); - -+ pax_force_retaddr - ret; - ENDPROC(__twofish_enc_blk8) - -@@ -324,6 +326,7 @@ __twofish_dec_blk8: - outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2); - outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); - -+ pax_force_retaddr - ret; - ENDPROC(__twofish_dec_blk8) - -@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way) - - store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - -+ pax_force_retaddr - ret; - ENDPROC(twofish_ecb_enc_8way) - -@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way) - - store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -+ pax_force_retaddr - ret; - ENDPROC(twofish_ecb_dec_8way) - -@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way) - * %rdx: src - */ - -- pushq %r12; -+ pushq %r14; - - movq %rsi, %r11; -- movq %rdx, %r12; -+ movq %rdx, %r14; - - load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - - call __twofish_dec_blk8; - -- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); -+ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -- popq %r12; -+ popq %r14; - -+ pax_force_retaddr - ret; - ENDPROC(twofish_cbc_dec_8way) - -@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way) - * %rcx: iv (little endian, 128bit) - */ - -- pushq %r12; -+ pushq %r14; - - movq %rsi, %r11; -- movq %rdx, %r12; -+ movq %rdx, %r14; - - load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2, - RD2, RX0, RX1, RY0); - - call __twofish_enc_blk8; - -- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); -+ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - -- popq %r12; -+ popq %r14; - -+ pax_force_retaddr - ret; - ENDPROC(twofish_ctr_8way) - -@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way) - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2); - -+ pax_force_retaddr - ret; - ENDPROC(twofish_xts_enc_8way) - -@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way) - /* dst <= regs xor IVs(in dst) */ - store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2); - -+ pax_force_retaddr - ret; - ENDPROC(twofish_xts_dec_8way) -diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S -index 1c3b7ce..02f578d 100644 ---- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S -+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S -@@ -21,6 +21,7 @@ - */ - - #include <linux/linkage.h> -+#include <asm/alternative-asm.h> - - .file "twofish-x86_64-asm-3way.S" - .text -@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way) - popq %r13; - popq %r14; - popq %r15; -+ pax_force_retaddr - ret; - - .L__enc_xor3: -@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way) - popq %r13; - popq %r14; - popq %r15; -+ pax_force_retaddr - ret; - ENDPROC(__twofish_enc_blk_3way) - -@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way) - popq %r13; - popq %r14; - popq %r15; -+ pax_force_retaddr - ret; - ENDPROC(twofish_dec_blk_3way) -diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S -index a039d21..524b8b2 100644 ---- a/arch/x86/crypto/twofish-x86_64-asm_64.S -+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S -@@ -22,6 +22,7 @@ - - #include <linux/linkage.h> - #include <asm/asm-offsets.h> -+#include <asm/alternative-asm.h> - - #define a_offset 0 - #define b_offset 4 -@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk) - - popq R1 - movq $1,%rax -+ pax_force_retaddr - ret - ENDPROC(twofish_enc_blk) - -@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk) - - popq R1 - movq $1,%rax -+ pax_force_retaddr - ret - ENDPROC(twofish_dec_blk) -diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c -index d21ff89..6da8e6e 100644 ---- a/arch/x86/ia32/ia32_aout.c -+++ b/arch/x86/ia32/ia32_aout.c -@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm) - unsigned long dump_start, dump_size; - struct user32 dump; - -+ memset(&dump, 0, sizeof(dump)); -+ - fs = get_fs(); - set_fs(KERNEL_DS); - has_dumped = 1; -diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c -index 2206757..85cbcfa 100644 ---- a/arch/x86/ia32/ia32_signal.c -+++ b/arch/x86/ia32/ia32_signal.c -@@ -218,7 +218,7 @@ asmlinkage long sys32_sigreturn(void) - if (__get_user(set.sig[0], &frame->sc.oldmask) - || (_COMPAT_NSIG_WORDS > 1 - && __copy_from_user((((char *) &set.sig) + 4), -- &frame->extramask, -+ frame->extramask, - sizeof(frame->extramask)))) - goto badframe; - -@@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, - sp -= frame_size; - /* Align the stack pointer according to the i386 ABI, - * i.e. so that on function entry ((sp + 4) & 15) == 0. */ -- sp = ((sp + 4) & -16ul) - 4; -+ sp = ((sp - 12) & -16ul) - 4; - return (void __user *) sp; - } - -@@ -386,7 +386,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig, - restorer = VDSO32_SYMBOL(current->mm->context.vdso, - sigreturn); - else -- restorer = &frame->retcode; -+ restorer = frame->retcode; - } - - put_user_try { -@@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig, - * These are actually not used anymore, but left because some - * gdb versions depend on them as a marker. - */ -- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); -+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); - } put_user_catch(err); - - if (err) -@@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig, - 0xb8, - __NR_ia32_rt_sigreturn, - 0x80cd, -- 0, -+ 0 - }; - - frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate); -@@ -461,16 +461,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig, - - if (ksig->ka.sa.sa_flags & SA_RESTORER) - restorer = ksig->ka.sa.sa_restorer; -+ else if (current->mm->context.vdso) -+ /* Return stub is in 32bit vsyscall page */ -+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); - else -- restorer = VDSO32_SYMBOL(current->mm->context.vdso, -- rt_sigreturn); -+ restorer = frame->retcode; - put_user_ex(ptr_to_compat(restorer), &frame->pretcode); - - /* - * Not actually used anymore, but left because some gdb - * versions need it. - */ -- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); -+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode); - } put_user_catch(err); - - err |= copy_siginfo_to_user32(&frame->info, &ksig->info); -diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S -index 92a2e93..9b829fa 100644 ---- a/arch/x86/ia32/ia32entry.S -+++ b/arch/x86/ia32/ia32entry.S -@@ -15,8 +15,10 @@ - #include <asm/irqflags.h> - #include <asm/asm.h> - #include <asm/smap.h> -+#include <asm/pgtable.h> - #include <linux/linkage.h> - #include <linux/err.h> -+#include <asm/alternative-asm.h> - - /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ - #include <linux/elf-em.h> -@@ -62,12 +64,12 @@ - */ - .macro LOAD_ARGS32 offset, _r9=0 - .if \_r9 -- movl \offset+16(%rsp),%r9d -+ movl \offset+R9(%rsp),%r9d - .endif -- movl \offset+40(%rsp),%ecx -- movl \offset+48(%rsp),%edx -- movl \offset+56(%rsp),%esi -- movl \offset+64(%rsp),%edi -+ movl \offset+RCX(%rsp),%ecx -+ movl \offset+RDX(%rsp),%edx -+ movl \offset+RSI(%rsp),%esi -+ movl \offset+RDI(%rsp),%edi - movl %eax,%eax /* zero extension */ - .endm - -@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit) - ENDPROC(native_irq_enable_sysexit) - #endif - -+ .macro pax_enter_kernel_user -+ pax_set_fptr_mask -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ call pax_enter_kernel_user -+#endif -+ .endm -+ -+ .macro pax_exit_kernel_user -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ call pax_exit_kernel_user -+#endif -+#ifdef CONFIG_PAX_RANDKSTACK -+ pushq %rax -+ pushq %r11 -+ call pax_randomize_kstack -+ popq %r11 -+ popq %rax -+#endif -+ .endm -+ -+ .macro pax_erase_kstack -+#ifdef CONFIG_PAX_MEMORY_STACKLEAK -+ call pax_erase_kstack -+#endif -+ .endm -+ - /* - * 32bit SYSENTER instruction entry. - * -@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target) - CFI_REGISTER rsp,rbp - SWAPGS_UNSAFE_STACK - movq PER_CPU_VAR(kernel_stack), %rsp -- addq $(KERNEL_STACK_OFFSET),%rsp -- /* -- * No need to follow this irqs on/off section: the syscall -- * disabled irqs, here we enable it straight after entry: -- */ -- ENABLE_INTERRUPTS(CLBR_NONE) - movl %ebp,%ebp /* zero extension */ - pushq_cfi $__USER32_DS - /*CFI_REL_OFFSET ss,0*/ -@@ -135,23 +157,46 @@ ENTRY(ia32_sysenter_target) - CFI_REL_OFFSET rsp,0 - pushfq_cfi - /*CFI_REL_OFFSET rflags,0*/ -- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d -- CFI_REGISTER rip,r10 -+ orl $X86_EFLAGS_IF,(%rsp) -+ GET_THREAD_INFO(%r11) -+ movl TI_sysenter_return(%r11), %r11d -+ CFI_REGISTER rip,r11 - pushq_cfi $__USER32_CS - /*CFI_REL_OFFSET cs,0*/ - movl %eax, %eax -- pushq_cfi %r10 -+ pushq_cfi %r11 - CFI_REL_OFFSET rip,0 - pushq_cfi %rax - cld - SAVE_ARGS 0,1,0 -+ pax_enter_kernel_user -+ -+#ifdef CONFIG_PAX_RANDKSTACK -+ pax_erase_kstack -+#endif -+ -+ /* -+ * No need to follow this irqs on/off section: the syscall -+ * disabled irqs, here we enable it straight after entry: -+ */ -+ ENABLE_INTERRUPTS(CLBR_NONE) - /* no need to do an access_ok check here because rbp has been - 32bit zero extended */ -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ addq pax_user_shadow_base,%rbp -+ ASM_PAX_OPEN_USERLAND -+#endif -+ - ASM_STAC - 1: movl (%rbp),%ebp - _ASM_EXTABLE(1b,ia32_badarg) - ASM_CLAC - -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ ASM_PAX_CLOSE_USERLAND -+#endif -+ - /* - * Sysenter doesn't filter flags, so we need to clear NT - * ourselves. To save a few cycles, we can check whether -@@ -161,8 +206,9 @@ ENTRY(ia32_sysenter_target) - jnz sysenter_fix_flags - sysenter_flags_fixed: - -- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) -- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) -+ GET_THREAD_INFO(%r11) -+ orl $TS_COMPAT,TI_status(%r11) -+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) - CFI_REMEMBER_STATE - jnz sysenter_tracesys - cmpq $(IA32_NR_syscalls-1),%rax -@@ -172,15 +218,18 @@ sysenter_do_call: - sysenter_dispatch: - call *ia32_sys_call_table(,%rax,8) - movq %rax,RAX-ARGOFFSET(%rsp) -+ GET_THREAD_INFO(%r11) - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF -- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) -+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11) - jnz sysexit_audit - sysexit_from_sys_call: -- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) -+ pax_exit_kernel_user -+ pax_erase_kstack -+ andl $~TS_COMPAT,TI_status(%r11) - /* clear IF, that popfq doesn't enable interrupts early */ -- andl $~0x200,EFLAGS-R11(%rsp) -- movl RIP-R11(%rsp),%edx /* User %eip */ -+ andl $~X86_EFLAGS_IF,EFLAGS(%rsp) -+ movl RIP(%rsp),%edx /* User %eip */ - CFI_REGISTER rip,rdx - RESTORE_ARGS 0,24,0,0,0,0 - xorq %r8,%r8 -@@ -205,6 +254,9 @@ sysexit_from_sys_call: - movl %eax,%esi /* 2nd arg: syscall number */ - movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ - call __audit_syscall_entry -+ -+ pax_erase_kstack -+ - movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ - cmpq $(IA32_NR_syscalls-1),%rax - ja ia32_badsys -@@ -216,7 +268,7 @@ sysexit_from_sys_call: - .endm - - .macro auditsys_exit exit -- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) -+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) - jnz ia32_ret_from_sys_call - TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) -@@ -227,11 +279,12 @@ sysexit_from_sys_call: - 1: setbe %al /* 1 if error, 0 if not */ - movzbl %al,%edi /* zero-extend that into %edi */ - call __audit_syscall_exit -+ GET_THREAD_INFO(%r11) - movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ - movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF -- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) -+ testl %edi,TI_flags(%r11) - jz \exit - CLEAR_RREGS -ARGOFFSET - jmp int_with_check -@@ -253,7 +306,7 @@ sysenter_fix_flags: - - sysenter_tracesys: - #ifdef CONFIG_AUDITSYSCALL -- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) -+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) - jz sysenter_auditsys - #endif - SAVE_REST -@@ -265,6 +318,9 @@ sysenter_tracesys: - RESTORE_REST - cmpq $(IA32_NR_syscalls-1),%rax - ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ -+ -+ pax_erase_kstack -+ - jmp sysenter_do_call - CFI_ENDPROC - ENDPROC(ia32_sysenter_target) -@@ -292,19 +348,25 @@ ENDPROC(ia32_sysenter_target) - ENTRY(ia32_cstar_target) - CFI_STARTPROC32 simple - CFI_SIGNAL_FRAME -- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET -+ CFI_DEF_CFA rsp,0 - CFI_REGISTER rip,rcx - /*CFI_REGISTER rflags,r11*/ - SWAPGS_UNSAFE_STACK - movl %esp,%r8d - CFI_REGISTER rsp,r8 - movq PER_CPU_VAR(kernel_stack),%rsp -+ SAVE_ARGS 8*6,0,0 -+ pax_enter_kernel_user -+ -+#ifdef CONFIG_PAX_RANDKSTACK -+ pax_erase_kstack -+#endif -+ - /* - * No need to follow this irqs on/off section: the syscall - * disabled irqs and here we enable it straight after entry: - */ - ENABLE_INTERRUPTS(CLBR_NONE) -- SAVE_ARGS 8,0,0 - movl %eax,%eax /* zero extension */ - movq %rax,ORIG_RAX-ARGOFFSET(%rsp) - movq %rcx,RIP-ARGOFFSET(%rsp) -@@ -320,12 +382,25 @@ ENTRY(ia32_cstar_target) - /* no need to do an access_ok check here because r8 has been - 32bit zero extended */ - /* hardware stack frame is complete now */ -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ ASM_PAX_OPEN_USERLAND -+ movq pax_user_shadow_base,%r8 -+ addq RSP-ARGOFFSET(%rsp),%r8 -+#endif -+ - ASM_STAC - 1: movl (%r8),%r9d - _ASM_EXTABLE(1b,ia32_badarg) - ASM_CLAC -- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) -- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ ASM_PAX_CLOSE_USERLAND -+#endif -+ -+ GET_THREAD_INFO(%r11) -+ orl $TS_COMPAT,TI_status(%r11) -+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) - CFI_REMEMBER_STATE - jnz cstar_tracesys - cmpq $IA32_NR_syscalls-1,%rax -@@ -335,13 +410,16 @@ cstar_do_call: - cstar_dispatch: - call *ia32_sys_call_table(,%rax,8) - movq %rax,RAX-ARGOFFSET(%rsp) -+ GET_THREAD_INFO(%r11) - DISABLE_INTERRUPTS(CLBR_NONE) - TRACE_IRQS_OFF -- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) -+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11) - jnz sysretl_audit - sysretl_from_sys_call: -- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) -- RESTORE_ARGS 0,-ARG_SKIP,0,0,0 -+ pax_exit_kernel_user -+ pax_erase_kstack -+ andl $~TS_COMPAT,TI_status(%r11) -+ RESTORE_ARGS 0,-ORIG_RAX,0,0,0 - movl RIP-ARGOFFSET(%rsp),%ecx - CFI_REGISTER rip,rcx - movl EFLAGS-ARGOFFSET(%rsp),%r11d -@@ -368,7 +446,7 @@ sysretl_audit: - - cstar_tracesys: - #ifdef CONFIG_AUDITSYSCALL -- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) -+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11) - jz cstar_auditsys - #endif - xchgl %r9d,%ebp -@@ -382,11 +460,19 @@ cstar_tracesys: - xchgl %ebp,%r9d - cmpq $(IA32_NR_syscalls-1),%rax - ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ -+ -+ pax_erase_kstack -+ - jmp cstar_do_call - END(ia32_cstar_target) - - ia32_badarg: - ASM_CLAC -+ -+#ifdef CONFIG_PAX_MEMORY_UDEREF -+ ASM_PAX_CLOSE_USERLAND -+#endif -+ - movq $-EFAULT,%rax - jmp ia32_sysret - CFI_ENDPROC -@@ -423,19 +509,26 @@ ENTRY(ia32_syscall) - CFI_REL_OFFSET rip,RIP-RIP - PARAVIRT_ADJUST_EXCEPTION_FRAME - SWAPGS -- /* -- * No need to follow this irqs on/off section: the syscall -- * disabled irqs and here we enable it straight after entry: -- */ -- ENABLE_INTERRUPTS(CLBR_NONE) - movl %eax,%eax - pushq_cfi %rax - cld - /* note the registers are not zero extended to the sf. - this could be a problem. */ - SAVE_ARGS 0,1,0 -- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) -- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) -+ pax_enter_kernel_user -+ -+#ifdef CONFIG_PAX_RANDKSTACK -+ pax_erase_kstack -+#endif -+ -+ /* -+ * No need to follow this irqs on/off section: the syscall -+ * disabled irqs and here we enable it straight after entry: -+ */ -+ ENABLE_INTERRUPTS(CLBR_NONE) -+ GET_THREAD_INFO(%r11) -+ orl $TS_COMPAT,TI_status(%r11) -+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11) - jnz ia32_tracesys - cmpq $(IA32_NR_syscalls-1),%rax - ja ia32_badsys -@@ -458,6 +551,9 @@ ia32_tracesys: - RESTORE_REST - cmpq $(IA32_NR_syscalls-1),%rax - ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ -+ -+ pax_erase_kstack -+ - jmp ia32_do_call - END(ia32_syscall) - -diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c -index 8e0ceec..af13504 100644 ---- a/arch/x86/ia32/sys_ia32.c -+++ b/arch/x86/ia32/sys_ia32.c -@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, - */ - static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) - { -- typeof(ubuf->st_uid) uid = 0; -- typeof(ubuf->st_gid) gid = 0; -+ typeof(((struct stat64 *)0)->st_uid) uid = 0; -+ typeof(((struct stat64 *)0)->st_gid) gid = 0; - SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid)); - SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid)); - if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) || -diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h -index 372231c..51b537d 100644 ---- a/arch/x86/include/asm/alternative-asm.h -+++ b/arch/x86/include/asm/alternative-asm.h -@@ -18,6 +18,45 @@ - .endm - #endif - -+#ifdef KERNEXEC_PLUGIN -+ .macro pax_force_retaddr_bts rip=0 -+ btsq $63,\rip(%rsp) -+ .endm -+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS -+ .macro pax_force_retaddr rip=0, reload=0 -+ btsq $63,\rip(%rsp) -+ .endm -+ .macro pax_force_fptr ptr -+ btsq $63,\ptr -+ .endm -+ .macro pax_set_fptr_mask -+ .endm -+#endif -+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR -+ .macro pax_force_retaddr rip=0, reload=0 -+ .if \reload -+ pax_set_fptr_mask -+ .endif -+ orq %r12,\rip(%rsp) -+ .endm -+ .macro pax_force_fptr ptr -+ orq %r12,\ptr -+ .endm -+ .macro pax_set_fptr_mask -+ movabs $0x8000000000000000,%r12 -+ .endm -+#endif -+#else -+ .macro pax_force_retaddr rip=0, reload=0 -+ .endm -+ .macro pax_force_fptr ptr -+ .endm -+ .macro pax_force_retaddr_bts rip=0 -+ .endm -+ .macro pax_set_fptr_mask -+ .endm -+#endif -+ - .macro altinstruction_entry orig alt feature orig_len alt_len - .long \orig - . - .long \alt - . -diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h -index 0a3f9c9..c9d081d 100644 ---- a/arch/x86/include/asm/alternative.h -+++ b/arch/x86/include/asm/alternative.h -@@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end) - ".pushsection .discard,\"aw\",@progbits\n" \ - DISCARD_ENTRY(1) \ - ".popsection\n" \ -- ".pushsection .altinstr_replacement, \"ax\"\n" \ -+ ".pushsection .altinstr_replacement, \"a\"\n" \ - ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ - ".popsection" - -@@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end) - DISCARD_ENTRY(1) \ - DISCARD_ENTRY(2) \ - ".popsection\n" \ -- ".pushsection .altinstr_replacement, \"ax\"\n" \ -+ ".pushsection .altinstr_replacement, \"a\"\n" \ - ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ - ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ - ".popsection" -diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h -index 1d2091a..f5074c1 100644 ---- a/arch/x86/include/asm/apic.h -+++ b/arch/x86/include/asm/apic.h -@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void) - - #ifdef CONFIG_X86_LOCAL_APIC - --extern unsigned int apic_verbosity; -+extern int apic_verbosity; - extern int local_apic_timer_c2_ok; - - extern int disable_apic; -diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h -index 20370c6..a2eb9b0 100644 ---- a/arch/x86/include/asm/apm.h -+++ b/arch/x86/include/asm/apm.h -@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in, - __asm__ __volatile__(APM_DO_ZERO_SEGS - "pushl %%edi\n\t" - "pushl %%ebp\n\t" -- "lcall *%%cs:apm_bios_entry\n\t" -+ "lcall *%%ss:apm_bios_entry\n\t" - "setc %%al\n\t" - "popl %%ebp\n\t" - "popl %%edi\n\t" -@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in, - __asm__ __volatile__(APM_DO_ZERO_SEGS - "pushl %%edi\n\t" - "pushl %%ebp\n\t" -- "lcall *%%cs:apm_bios_entry\n\t" -+ "lcall *%%ss:apm_bios_entry\n\t" - "setc %%bl\n\t" - "popl %%ebp\n\t" - "popl %%edi\n\t" -diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h -index b17f4f4..7a16182 100644 ---- a/arch/x86/include/asm/atomic.h -+++ b/arch/x86/include/asm/atomic.h -@@ -23,7 +23,18 @@ - */ - static inline int atomic_read(const atomic_t *v) - { -- return (*(volatile int *)&(v)->counter); -+ return (*(volatile const int *)&(v)->counter); -+} -+ -+/** -+ * atomic_read_unchecked - read atomic variable -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically reads the value of @v. -+ */ -+static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v) -+{ -+ return (*(volatile const int *)&(v)->counter); - } - - /** -@@ -39,6 +50,18 @@ static inline void atomic_set(atomic_t *v, int i) - } - - /** -+ * atomic_set_unchecked - set atomic variable -+ * @v: pointer of type atomic_unchecked_t -+ * @i: required value -+ * -+ * Atomically sets the value of @v to @i. -+ */ -+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) -+{ -+ v->counter = i; -+} -+ -+/** - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t -@@ -47,7 +70,29 @@ static inline void atomic_set(atomic_t *v, int i) - */ - static inline void atomic_add(int i, atomic_t *v) - { -- asm volatile(LOCK_PREFIX "addl %1,%0" -+ asm volatile(LOCK_PREFIX "addl %1,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "subl %1,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "+m" (v->counter) -+ : "ir" (i)); -+} -+ -+/** -+ * atomic_add_unchecked - add integer to atomic variable -+ * @i: integer value to add -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically adds @i to @v. -+ */ -+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "addl %1,%0\n" - : "+m" (v->counter) - : "ir" (i)); - } -@@ -61,7 +106,29 @@ static inline void atomic_add(int i, atomic_t *v) - */ - static inline void atomic_sub(int i, atomic_t *v) - { -- asm volatile(LOCK_PREFIX "subl %1,%0" -+ asm volatile(LOCK_PREFIX "subl %1,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "addl %1,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "+m" (v->counter) -+ : "ir" (i)); -+} -+ -+/** -+ * atomic_sub_unchecked - subtract integer from atomic variable -+ * @i: integer value to subtract -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically subtracts @i from @v. -+ */ -+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "subl %1,%0\n" - : "+m" (v->counter) - : "ir" (i)); - } -@@ -77,7 +144,7 @@ static inline void atomic_sub(int i, atomic_t *v) - */ - static inline int atomic_sub_and_test(int i, atomic_t *v) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); -+ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e"); - } - - /** -@@ -88,7 +155,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) - */ - static inline void atomic_inc(atomic_t *v) - { -- asm volatile(LOCK_PREFIX "incl %0" -+ asm volatile(LOCK_PREFIX "incl %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "decl %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "+m" (v->counter)); -+} -+ -+/** -+ * atomic_inc_unchecked - increment atomic variable -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically increments @v by 1. -+ */ -+static inline void atomic_inc_unchecked(atomic_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "incl %0\n" - : "+m" (v->counter)); - } - -@@ -100,7 +187,27 @@ static inline void atomic_inc(atomic_t *v) - */ - static inline void atomic_dec(atomic_t *v) - { -- asm volatile(LOCK_PREFIX "decl %0" -+ asm volatile(LOCK_PREFIX "decl %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "incl %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "+m" (v->counter)); -+} -+ -+/** -+ * atomic_dec_unchecked - decrement atomic variable -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically decrements @v by 1. -+ */ -+static inline void atomic_dec_unchecked(atomic_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "decl %0\n" - : "+m" (v->counter)); - } - -@@ -114,7 +221,7 @@ static inline void atomic_dec(atomic_t *v) - */ - static inline int atomic_dec_and_test(atomic_t *v) - { -- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); -+ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e"); - } - - /** -@@ -127,7 +234,20 @@ static inline int atomic_dec_and_test(atomic_t *v) - */ - static inline int atomic_inc_and_test(atomic_t *v) - { -- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e"); -+ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e"); -+} -+ -+/** -+ * atomic_inc_and_test_unchecked - increment and test -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically increments @v by 1 -+ * and returns true if the result is zero, or false for all -+ * other cases. -+ */ -+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v) -+{ -+ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e"); - } - - /** -@@ -141,7 +261,7 @@ static inline int atomic_inc_and_test(atomic_t *v) - */ - static inline int atomic_add_negative(int i, atomic_t *v) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s"); -+ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s"); - } - - /** -@@ -151,7 +271,19 @@ static inline int atomic_add_negative(int i, atomic_t *v) - * - * Atomically adds @i to @v and returns @i + @v - */ --static inline int atomic_add_return(int i, atomic_t *v) -+static inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v) -+{ -+ return i + xadd_check_overflow(&v->counter, i); -+} -+ -+/** -+ * atomic_add_return_unchecked - add integer and return -+ * @i: integer value to add -+ * @v: pointer of type atomic_unchecked_t -+ * -+ * Atomically adds @i to @v and returns @i + @v -+ */ -+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) - { - return i + xadd(&v->counter, i); - } -@@ -163,15 +295,24 @@ static inline int atomic_add_return(int i, atomic_t *v) - * - * Atomically subtracts @i from @v and returns @v - @i - */ --static inline int atomic_sub_return(int i, atomic_t *v) -+static inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v) - { - return atomic_add_return(-i, v); - } - - #define atomic_inc_return(v) (atomic_add_return(1, v)) -+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) -+{ -+ return atomic_add_return_unchecked(1, v); -+} - #define atomic_dec_return(v) (atomic_sub_return(1, v)) - --static inline int atomic_cmpxchg(atomic_t *v, int old, int new) -+static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new) -+{ -+ return cmpxchg(&v->counter, old, new); -+} -+ -+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new) - { - return cmpxchg(&v->counter, old, new); - } -@@ -181,6 +322,11 @@ static inline int atomic_xchg(atomic_t *v, int new) - return xchg(&v->counter, new); - } - -+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new) -+{ -+ return xchg(&v->counter, new); -+} -+ - /** - * __atomic_add_unless - add unless the number is already a given value - * @v: pointer of type atomic_t -@@ -190,14 +336,27 @@ static inline int atomic_xchg(atomic_t *v, int new) - * Atomically adds @a to @v, so long as @v was not already @u. - * Returns the old value of @v. - */ --static inline int __atomic_add_unless(atomic_t *v, int a, int u) -+static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u) - { -- int c, old; -+ int c, old, new; - c = atomic_read(v); - for (;;) { -- if (unlikely(c == (u))) -+ if (unlikely(c == u)) - break; -- old = atomic_cmpxchg((v), c, c + (a)); -+ -+ asm volatile("addl %2,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "subl %2,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=r" (new) -+ : "0" (c), "ir" (a)); -+ -+ old = atomic_cmpxchg(v, c, new); - if (likely(old == c)) - break; - c = old; -@@ -206,6 +365,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) - } - - /** -+ * atomic_inc_not_zero_hint - increment if not null -+ * @v: pointer of type atomic_t -+ * @hint: probable value of the atomic before the increment -+ * -+ * This version of atomic_inc_not_zero() gives a hint of probable -+ * value of the atomic. This helps processor to not read the memory -+ * before doing the atomic read/modify/write cycle, lowering -+ * number of bus transactions on some arches. -+ * -+ * Returns: 0 if increment was not done, 1 otherwise. -+ */ -+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint -+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) -+{ -+ int val, c = hint, new; -+ -+ /* sanity test, should be removed by compiler if hint is a constant */ -+ if (!hint) -+ return __atomic_add_unless(v, 1, 0); -+ -+ do { -+ asm volatile("incl %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "decl %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=r" (new) -+ : "0" (c)); -+ -+ val = atomic_cmpxchg(v, c, new); -+ if (val == c) -+ return 1; -+ c = val; -+ } while (c); -+ -+ return 0; -+} -+ -+/** - * atomic_inc_short - increment of a short integer - * @v: pointer to type int - * -@@ -234,14 +436,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2) - #endif - - /* These are x86-specific, used by some header files */ --#define atomic_clear_mask(mask, addr) \ -- asm volatile(LOCK_PREFIX "andl %0,%1" \ -- : : "r" (~(mask)), "m" (*(addr)) : "memory") -+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) -+{ -+ asm volatile(LOCK_PREFIX "andl %1,%0" -+ : "+m" (v->counter) -+ : "r" (~(mask)) -+ : "memory"); -+} - --#define atomic_set_mask(mask, addr) \ -- asm volatile(LOCK_PREFIX "orl %0,%1" \ -- : : "r" ((unsigned)(mask)), "m" (*(addr)) \ -- : "memory") -+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "andl %1,%0" -+ : "+m" (v->counter) -+ : "r" (~(mask)) -+ : "memory"); -+} -+ -+static inline void atomic_set_mask(unsigned int mask, atomic_t *v) -+{ -+ asm volatile(LOCK_PREFIX "orl %1,%0" -+ : "+m" (v->counter) -+ : "r" (mask) -+ : "memory"); -+} -+ -+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "orl %1,%0" -+ : "+m" (v->counter) -+ : "r" (mask) -+ : "memory"); -+} - - /* Atomic operations are already serializing on x86 */ - #define smp_mb__before_atomic_dec() barrier() -diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h -index b154de7..bf18a5a 100644 ---- a/arch/x86/include/asm/atomic64_32.h -+++ b/arch/x86/include/asm/atomic64_32.h -@@ -12,6 +12,14 @@ typedef struct { - u64 __aligned(8) counter; - } atomic64_t; - -+#ifdef CONFIG_PAX_REFCOUNT -+typedef struct { -+ u64 __aligned(8) counter; -+} atomic64_unchecked_t; -+#else -+typedef atomic64_t atomic64_unchecked_t; -+#endif -+ - #define ATOMIC64_INIT(val) { (val) } - - #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...) -@@ -37,21 +45,31 @@ typedef struct { - ATOMIC64_DECL_ONE(sym##_386) - - ATOMIC64_DECL_ONE(add_386); -+ATOMIC64_DECL_ONE(add_unchecked_386); - ATOMIC64_DECL_ONE(sub_386); -+ATOMIC64_DECL_ONE(sub_unchecked_386); - ATOMIC64_DECL_ONE(inc_386); -+ATOMIC64_DECL_ONE(inc_unchecked_386); - ATOMIC64_DECL_ONE(dec_386); -+ATOMIC64_DECL_ONE(dec_unchecked_386); - #endif - - #define alternative_atomic64(f, out, in...) \ - __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in) - - ATOMIC64_DECL(read); -+ATOMIC64_DECL(read_unchecked); - ATOMIC64_DECL(set); -+ATOMIC64_DECL(set_unchecked); - ATOMIC64_DECL(xchg); - ATOMIC64_DECL(add_return); -+ATOMIC64_DECL(add_return_unchecked); - ATOMIC64_DECL(sub_return); -+ATOMIC64_DECL(sub_return_unchecked); - ATOMIC64_DECL(inc_return); -+ATOMIC64_DECL(inc_return_unchecked); - ATOMIC64_DECL(dec_return); -+ATOMIC64_DECL(dec_return_unchecked); - ATOMIC64_DECL(dec_if_positive); - ATOMIC64_DECL(inc_not_zero); - ATOMIC64_DECL(add_unless); -@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n - } - - /** -+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable -+ * @p: pointer to type atomic64_unchecked_t -+ * @o: expected value -+ * @n: new value -+ * -+ * Atomically sets @v to @n if it was equal to @o and returns -+ * the old value. -+ */ -+ -+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n) -+{ -+ return cmpxchg64(&v->counter, o, n); -+} -+ -+/** - * atomic64_xchg - xchg atomic64 variable - * @v: pointer to type atomic64_t - * @n: value to assign -@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i) - } - - /** -+ * atomic64_set_unchecked - set atomic64 variable -+ * @v: pointer to type atomic64_unchecked_t -+ * @n: value to assign -+ * -+ * Atomically sets the value of @v to @n. -+ */ -+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i) -+{ -+ unsigned high = (unsigned)(i >> 32); -+ unsigned low = (unsigned)i; -+ alternative_atomic64(set, /* no output */, -+ "S" (v), "b" (low), "c" (high) -+ : "eax", "edx", "memory"); -+} -+ -+/** - * atomic64_read - read atomic64 variable - * @v: pointer to type atomic64_t - * -@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v) - } - - /** -+ * atomic64_read_unchecked - read atomic64 variable -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically reads the value of @v and returns it. -+ */ -+static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v) -+{ -+ long long r; -+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory"); -+ return r; -+ } -+ -+/** - * atomic64_add_return - add and return - * @i: integer value to add - * @v: pointer to type atomic64_t -@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) - return i; - } - -+/** -+ * atomic64_add_return_unchecked - add and return -+ * @i: integer value to add -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically adds @i to @v and returns @i + *@v -+ */ -+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v) -+{ -+ alternative_atomic64(add_return_unchecked, -+ ASM_OUTPUT2("+A" (i), "+c" (v)), -+ ASM_NO_INPUT_CLOBBER("memory")); -+ return i; -+} -+ - /* - * Other variants with different arithmetic operators: - */ -@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v) - return a; - } - -+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) -+{ -+ long long a; -+ alternative_atomic64(inc_return_unchecked, "=&A" (a), -+ "S" (v) : "memory", "ecx"); -+ return a; -+} -+ - static inline long long atomic64_dec_return(atomic64_t *v) - { - long long a; -@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v) - } - - /** -+ * atomic64_add_unchecked - add integer to atomic64 variable -+ * @i: integer value to add -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically adds @i to @v. -+ */ -+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v) -+{ -+ __alternative_atomic64(add_unchecked, add_return_unchecked, -+ ASM_OUTPUT2("+A" (i), "+c" (v)), -+ ASM_NO_INPUT_CLOBBER("memory")); -+ return i; -+} -+ -+/** - * atomic64_sub - subtract the atomic64 variable - * @i: integer value to subtract - * @v: pointer to type atomic64_t -diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h -index 46e9052..ae45136 100644 ---- a/arch/x86/include/asm/atomic64_64.h -+++ b/arch/x86/include/asm/atomic64_64.h -@@ -18,7 +18,19 @@ - */ - static inline long atomic64_read(const atomic64_t *v) - { -- return (*(volatile long *)&(v)->counter); -+ return (*(volatile const long *)&(v)->counter); -+} -+ -+/** -+ * atomic64_read_unchecked - read atomic64 variable -+ * @v: pointer of type atomic64_unchecked_t -+ * -+ * Atomically reads the value of @v. -+ * Doesn't imply a read memory barrier. -+ */ -+static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v) -+{ -+ return (*(volatile const long *)&(v)->counter); - } - - /** -@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i) - } - - /** -+ * atomic64_set_unchecked - set atomic64 variable -+ * @v: pointer to type atomic64_unchecked_t -+ * @i: required value -+ * -+ * Atomically sets the value of @v to @i. -+ */ -+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) -+{ -+ v->counter = i; -+} -+ -+/** - * atomic64_add - add integer to atomic64 variable - * @i: integer value to add - * @v: pointer to type atomic64_t -@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i) - */ - static inline void atomic64_add(long i, atomic64_t *v) - { -+ asm volatile(LOCK_PREFIX "addq %1,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "subq %1,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=m" (v->counter) -+ : "er" (i), "m" (v->counter)); -+} -+ -+/** -+ * atomic64_add_unchecked - add integer to atomic64 variable -+ * @i: integer value to add -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically adds @i to @v. -+ */ -+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) -+{ - asm volatile(LOCK_PREFIX "addq %1,%0" - : "=m" (v->counter) - : "er" (i), "m" (v->counter)); -@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v) - */ - static inline void atomic64_sub(long i, atomic64_t *v) - { -- asm volatile(LOCK_PREFIX "subq %1,%0" -+ asm volatile(LOCK_PREFIX "subq %1,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "addq %1,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=m" (v->counter) -+ : "er" (i), "m" (v->counter)); -+} -+ -+/** -+ * atomic64_sub_unchecked - subtract the atomic64 variable -+ * @i: integer value to subtract -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically subtracts @i from @v. -+ */ -+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "subq %1,%0\n" - : "=m" (v->counter) - : "er" (i), "m" (v->counter)); - } -@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) - */ - static inline int atomic64_sub_and_test(long i, atomic64_t *v) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e"); -+ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e"); - } - - /** -@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) - */ - static inline void atomic64_inc(atomic64_t *v) - { -+ asm volatile(LOCK_PREFIX "incq %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "decq %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=m" (v->counter) -+ : "m" (v->counter)); -+} -+ -+/** -+ * atomic64_inc_unchecked - increment atomic64 variable -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically increments @v by 1. -+ */ -+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) -+{ - asm volatile(LOCK_PREFIX "incq %0" - : "=m" (v->counter) - : "m" (v->counter)); -@@ -96,7 +185,28 @@ static inline void atomic64_inc(atomic64_t *v) - */ - static inline void atomic64_dec(atomic64_t *v) - { -- asm volatile(LOCK_PREFIX "decq %0" -+ asm volatile(LOCK_PREFIX "decq %0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ LOCK_PREFIX "incq %0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=m" (v->counter) -+ : "m" (v->counter)); -+} -+ -+/** -+ * atomic64_dec_unchecked - decrement atomic64 variable -+ * @v: pointer to type atomic64_t -+ * -+ * Atomically decrements @v by 1. -+ */ -+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) -+{ -+ asm volatile(LOCK_PREFIX "decq %0\n" - : "=m" (v->counter) - : "m" (v->counter)); - } -@@ -111,7 +221,7 @@ static inline void atomic64_dec(atomic64_t *v) - */ - static inline int atomic64_dec_and_test(atomic64_t *v) - { -- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e"); -+ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e"); - } - - /** -@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v) - */ - static inline int atomic64_inc_and_test(atomic64_t *v) - { -- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e"); -+ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e"); - } - - /** -@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v) - */ - static inline int atomic64_add_negative(long i, atomic64_t *v) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s"); -+ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s"); - } - - /** -@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) - */ - static inline long atomic64_add_return(long i, atomic64_t *v) - { -+ return i + xadd_check_overflow(&v->counter, i); -+} -+ -+/** -+ * atomic64_add_return_unchecked - add and return -+ * @i: integer value to add -+ * @v: pointer to type atomic64_unchecked_t -+ * -+ * Atomically adds @i to @v and returns @i + @v -+ */ -+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) -+{ - return i + xadd(&v->counter, i); - } - -@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) - } - - #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) -+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) -+{ -+ return atomic64_add_return_unchecked(1, v); -+} - #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) - - static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) -@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) - return cmpxchg(&v->counter, old, new); - } - -+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new) -+{ -+ return cmpxchg(&v->counter, old, new); -+} -+ - static inline long atomic64_xchg(atomic64_t *v, long new) - { - return xchg(&v->counter, new); -@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new) - */ - static inline int atomic64_add_unless(atomic64_t *v, long a, long u) - { -- long c, old; -+ long c, old, new; - c = atomic64_read(v); - for (;;) { -- if (unlikely(c == (u))) -+ if (unlikely(c == u)) - break; -- old = atomic64_cmpxchg((v), c, c + (a)); -+ -+ asm volatile("add %2,%0\n" -+ -+#ifdef CONFIG_PAX_REFCOUNT -+ "jno 0f\n" -+ "sub %2,%0\n" -+ "int $4\n0:\n" -+ _ASM_EXTABLE(0b, 0b) -+#endif -+ -+ : "=r" (new) -+ : "0" (c), "ir" (a)); -+ -+ old = atomic64_cmpxchg(v, c, new); - if (likely(old == c)) - break; - c = old; - } -- return c != (u); -+ return c != u; - } - - #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h -index 69bbb48..32517fe 100644 ---- a/arch/x86/include/asm/barrier.h -+++ b/arch/x86/include/asm/barrier.h -@@ -107,7 +107,7 @@ - do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ -- ACCESS_ONCE(*p) = (v); \ -+ ACCESS_ONCE_RW(*p) = (v); \ - } while (0) - - #define smp_load_acquire(p) \ -@@ -124,7 +124,7 @@ do { \ - do { \ - compiletime_assert_atomic_type(*p); \ - barrier(); \ -- ACCESS_ONCE(*p) = (v); \ -+ ACCESS_ONCE_RW(*p) = (v); \ - } while (0) - - #define smp_load_acquire(p) \ -diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h -index 9fc1af7..98cab0b 100644 ---- a/arch/x86/include/asm/bitops.h -+++ b/arch/x86/include/asm/bitops.h -@@ -49,7 +49,7 @@ - * a mask operation on a byte. - */ - #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) --#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3)) -+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3)) - #define CONST_MASK(nr) (1 << ((nr) & 7)) - - /** -@@ -205,7 +205,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) - */ - static inline int test_and_set_bit(long nr, volatile unsigned long *addr) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); -+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c"); - } - - /** -@@ -251,7 +251,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) - */ - static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); -+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c"); - } - - /** -@@ -304,7 +304,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) - */ - static inline int test_and_change_bit(long nr, volatile unsigned long *addr) - { -- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); -+ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c"); - } - - static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) -@@ -345,7 +345,7 @@ static int test_bit(int nr, const volatile unsigned long *addr); - * - * Undefined if no bit exists, so code should check against 0 first. - */ --static inline unsigned long __ffs(unsigned long word) -+static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word) - { - asm("rep; bsf %1,%0" - : "=r" (word) -@@ -359,7 +359,7 @@ static inline unsigned long __ffs(unsigned long word) - * - * Undefined if no zero exists, so code should check against ~0UL first. - */ --static inline unsigned long ffz(unsigned long word) -+static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word) - { - asm("rep; bsf %1,%0" - : "=r" (word) -@@ -373,7 +373,7 @@ static inline unsigned long ffz(unsigned long word) - * - * Undefined if no set bit exists, so code should check against 0 first. - */ --static inline unsigned long __fls(unsigned long word) -+static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word) - { - asm("bsr %1,%0" - : "=r" (word) -@@ -436,7 +436,7 @@ static inline int ffs(int x) - * set bit if value is nonzero. The last (most significant) bit is - * at position 32. - */ --static inline int fls(int x) -+static inline int __intentional_overflow(-1) fls(int x) - { - int r; - -@@ -478,7 +478,7 @@ static inline int fls(int x) - * at position 64. - */ - #ifdef CONFIG_X86_64 --static __always_inline int fls64(__u64 x) -+static __always_inline __intentional_overflow(-1) int fls64(__u64 x) - { - int bitpos = -1; - /* -@@ -499,8 +499,6 @@ static __always_inline int fls64(__u64 x) - - #include <asm-generic/bitops/sched.h> - --#define ARCH_HAS_FAST_MULTIPLIER 1 -- - #include <asm/arch_hweight.h> - - #include <asm-generic/bitops/const_hweight.h> -diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h -index 4fa687a..60f2d39 100644 ---- a/arch/x86/include/asm/boot.h -+++ b/arch/x86/include/asm/boot.h -@@ -6,10 +6,15 @@ - #include <uapi/asm/boot.h> - - /* Physical address where kernel should be loaded. */ --#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ -+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ - + (CONFIG_PHYSICAL_ALIGN - 1)) \ - & ~(CONFIG_PHYSICAL_ALIGN - 1)) - -+#ifndef __ASSEMBLY__ -+extern unsigned char __LOAD_PHYSICAL_ADDR[]; -+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR) -+#endif -+ - /* Minimum kernel alignment, as a power of two */ - #ifdef CONFIG_X86_64 - #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT -diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h -index 48f99f1..d78ebf9 100644 ---- a/arch/x86/include/asm/cache.h -+++ b/arch/x86/include/asm/cache.h -@@ -5,12 +5,13 @@ - - /* L1 cache line size */ - #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) --#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) -+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) - - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) -+#define __read_only __attribute__((__section__(".data..read_only"))) - - #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT --#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) -+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT) - - #ifdef CONFIG_X86_VSMP - #ifdef CONFIG_SMP -diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h -index 9863ee3..4a1f8e1 100644 ---- a/arch/x86/include/asm/cacheflush.h -+++ b/arch/x86/include/asm/cacheflush.h -@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg) - unsigned long pg_flags = pg->flags & _PGMT_MASK; - - if (pg_flags == _PGMT_DEFAULT) -- return -1; -+ return ~0UL; - else if (pg_flags == _PGMT_WC) - return _PAGE_CACHE_WC; - else if (pg_flags == _PGMT_UC_MINUS) -diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h -index cb4c73b..c473c29 100644 ---- a/arch/x86/include/asm/calling.h -+++ b/arch/x86/include/asm/calling.h -@@ -82,103 +82,113 @@ For 32-bit we have the following conventions - kernel is built with - #define RSP 152 - #define SS 160 - --#define ARGOFFSET R11 --#define SWFRAME ORIG_RAX -+#define ARGOFFSET R15 - - .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1 -- subq $9*8+\addskip, %rsp -- CFI_ADJUST_CFA_OFFSET 9*8+\addskip -- movq_cfi rdi, 8*8 -- movq_cfi rsi, 7*8 -- movq_cfi rdx, 6*8 -+ subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp -+ CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip -+ movq_cfi rdi, RDI -+ movq_cfi rsi, RSI -+ movq_cfi rdx, RDX - - .if \save_rcx -- movq_cfi rcx, 5*8 -+ movq_cfi rcx, RCX - .endif - -- movq_cfi rax, 4*8 -+ movq_cfi rax, RAX - - .if \save_r891011 -- movq_cfi r8, 3*8 -- movq_cfi r9, 2*8 -- movq_cfi r10, 1*8 -- movq_cfi r11, 0*8 -+ movq_cfi r8, R8 -+ movq_cfi r9, R9 -+ movq_cfi r10, R10 -+ movq_cfi r11, R11 - .endif - -+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR -+ movq_cfi r12, R12 -+#endif -+ - .endm - --#define ARG_SKIP (9*8) -+#define ARG_SKIP ORIG_RAX - - .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \ - rstor_r8910=1, rstor_rdx=1 -+ -+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR -+ movq_cfi_restore R12, r12 -+#endif -+ - .if \rstor_r11 -- movq_cfi_restore 0*8, r11 -+ movq_cfi_restore R11, r11 - .endif - - .if \rstor_r8910 -- movq_cfi_restore 1*8, r10 -- movq_cfi_restore 2*8, r9 -- movq_cfi_restore 3*8, r8 -+ movq_cfi_restore R10, r10 -+ movq_cfi_restore R9, r9 -+ movq_cfi_restore R8, r8 - .endif - - .if \rstor_rax -- movq_cfi_restore 4*8, rax -+ movq_cfi_restore RAX, rax - .endif - - .if \rstor_rcx -- movq_cfi_restore 5*8, rcx -+ movq_cfi_restore RCX, rcx - .endif - - .if \rstor_rdx -- movq_cfi_restore 6*8, rdx -+ movq_cfi_restore RDX, rdx - .endif - -- movq_cfi_restore 7*8, rsi -- movq_cfi_restore 8*8, rdi -+ movq_cfi_restore RSI, rsi -+ movq_cfi_restore RDI, rdi - -- .if ARG_SKIP+\addskip > 0 -- addq $ARG_SKIP+\addskip, %rsp -- CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) -+ .if ORIG_RAX+\addskip > 0 -+ addq $ORIG_RAX+\addskip, %rsp -+ CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip) - .endif - .endm - -- .macro LOAD_ARGS offset, skiprax=0 -- movq \offset(%rsp), %r11 -- movq \offset+8(%rsp), %r10 -- movq \offset+16(%rsp), %r9 -- movq \offset+24(%rsp), %r8 -- movq \offset+40(%rsp), %rcx -- movq \offset+48(%rsp), %rdx -- movq \offset+56(%rsp), %rsi -- movq \offset+64(%rsp), %rdi -+ .macro LOAD_ARGS skiprax=0 -+ movq R11(%rsp), %r11 -+ movq R10(%rsp), %r10 -+ movq R9(%rsp), %r9 -+ movq R8(%rsp), %r8 -+ movq RCX(%rsp), %rcx -+ movq RDX(%rsp), %rdx -+ movq RSI(%rsp), %rsi -+ movq RDI(%rsp), %rdi - .if \skiprax - .else -- movq \offset+72(%rsp), %rax -+ movq RAX(%rsp), %rax - .endif - .endm - --#define REST_SKIP (6*8) -- - .macro SAVE_REST -- subq $REST_SKIP, %rsp -- CFI_ADJUST_CFA_OFFSET REST_SKIP -- movq_cfi rbx, 5*8 -- movq_cfi rbp, 4*8 -- movq_cfi r12, 3*8 -- movq_cfi r13, 2*8 -- movq_cfi r14, 1*8 -- movq_cfi r15, 0*8 -+ movq_cfi rbx, RBX -+ movq_cfi rbp, RBP -+ -+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR -+ movq_cfi r12, R12 -+#endif -+ -+ movq_cfi r13, R13 -+ movq_cfi r14, R14 -+ movq_cfi r15, R15 - .endm - - .macro RESTORE_REST -- movq_cfi_restore 0*8, r15 -- movq_cfi_restore 1*8, r14 -- movq_cfi_restore 2*8, r13 -- movq_cfi_restore 3*8, r12 -- movq_cfi_restore 4*8, rbp -- movq_cfi_restore 5*8, rbx -- addq $REST_SKIP, %rsp -- CFI_ADJUST_CFA_OFFSET -(REST_SKIP) -+ movq_cfi_restore R15, r15 -+ movq_cfi_restore R14, r14 -+ movq_cfi_restore R13, r13 -+ -+#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR -+ movq_cfi_restore R12, r12 -+#endif -+ -+ movq_cfi_restore RBP, rbp -+ movq_cfi_restore RBX, rbx - .endm - - .macro SAVE_ALL -diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h -index f50de69..2b0a458 100644 ---- a/arch/x86/include/asm/checksum_32.h -+++ b/arch/x86/include/asm/checksum_32.h -@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, - int len, __wsum sum, - int *src_err_ptr, int *dst_err_ptr); - -+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst, -+ int len, __wsum sum, -+ int *src_err_ptr, int *dst_err_ptr); -+ -+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst, -+ int len, __wsum sum, -+ int *src_err_ptr, int *dst_err_ptr); -+ - /* - * Note: when you get a NULL pointer exception here this means someone - * passed in an incorrect kernel address to one of these functions. -@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src, - - might_sleep(); - stac(); -- ret = csum_partial_copy_generic((__force void *)src, dst, -+ ret = csum_partial_copy_generic_from_user((__force void *)src, dst, - len, sum, err_ptr, NULL); - clac(); - -@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src, - might_sleep(); - if (access_ok(VERIFY_WRITE, dst, len)) { - stac(); -- ret = csum_partial_copy_generic(src, (__force void *)dst, -+ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst, - len, sum, NULL, err_ptr); - clac(); - return ret; -diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h -index d47786a..2d8883e 100644 ---- a/arch/x86/include/asm/cmpxchg.h -+++ b/arch/x86/include/asm/cmpxchg.h -@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void) - __compiletime_error("Bad argument size for cmpxchg"); - extern void __xadd_wrong_size(void) - __compiletime_error("Bad argument size for xadd"); -+extern void __xadd_check_overflow_wrong_size(void) -+ __compiletime_error("Bad argument size for xadd_check_overflow"); - extern void __add_wrong_size(void) - __compiletime_error("Bad argument size for add"); -+extern void __add_check_overflow_wrong_size(void) -+ __compiletime_error("Bad argument size for add_check_overflow"); - - /* - * Constants for operation sizes. On 32-bit, the 64-bit size it set to -@@ -67,6 +71,38 @@ extern void __add_wrong_size(void) - __ret; \ - }) - -+#ifdef CONFIG_PAX_REFCOUNT -+#define __xchg_op_check_overflow(ptr, arg, op, lock) \ -+ ({ \ -+ __typeof__ (*(ptr)) __ret = (arg); \ -+ switch (sizeof(*(ptr))) { \ -+ case __X86_CASE_L: \ -+ asm volatile (lock #op "l %0, %1\n" \ -+ "jno 0f\n" \ -+ "mov %0,%1\n" \ -+ "int $4\n0:\n" \ -+ _ASM_EXTABLE(0b, 0b) \ -+ : "+r" (__ret), "+m" (*(ptr)) \ -+ : : "memory", "cc"); \ -+ break; \ -+ case __X86_CASE_Q: \ -+ asm volatile (lock #op "q %q0, %1\n" \ -+ "jno 0f\n" \ -+ "mov %0,%1\n" \ -+ "int $4\n0:\n" \ -+ _ASM_EXTABLE(0b, 0b) \ -+ : "+r" (__ret), "+m" (*(ptr)) \ -+ : : "memory", "cc"); \ -+ break; \ -+ default: \ -+ __ ## op ## _check_overflow_wrong_size(); \ -+ } \ -+ __ret; \ -+ }) -+#else -+#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock) -+#endif -+ - /* - * Note: no "lock" prefix even on SMP: xchg always implies lock anyway. - * Since this is generally used to protect other memory information, we -@@ -167,6 +203,9 @@ extern void __add_wrong_size(void) - #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ") - #define xadd_local(ptr, inc) __xadd((ptr), (inc), "") - -+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock) -+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX) -+ - #define __add(ptr, inc, lock) \ - ({ \ - __typeof__ (*(ptr)) __ret = (inc); \ -diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h -index 59c6c40..5e0b22c 100644 ---- a/arch/x86/include/asm/compat.h -+++ b/arch/x86/include/asm/compat.h -@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64; - typedef u32 compat_uint_t; - typedef u32 compat_ulong_t; - typedef u64 __attribute__((aligned(4))) compat_u64; --typedef u32 compat_uptr_t; -+typedef u32 __user compat_uptr_t; - |