summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '4.2.4/4420_grsecurity-3.1-4.2.4-201510240907.patch')
-rw-r--r--4.2.4/4420_grsecurity-3.1-4.2.4-201510240907.patch172268
1 files changed, 172268 insertions, 0 deletions
diff --git a/4.2.4/4420_grsecurity-3.1-4.2.4-201510240907.patch b/4.2.4/4420_grsecurity-3.1-4.2.4-201510240907.patch
new file mode 100644
index 0000000..c6e64ba
--- /dev/null
+++ b/4.2.4/4420_grsecurity-3.1-4.2.4-201510240907.patch
@@ -0,0 +1,172268 @@
+diff --git a/Documentation/dontdiff b/Documentation/dontdiff
+index 9de9813..1462492 100644
+--- a/Documentation/dontdiff
++++ b/Documentation/dontdiff
+@@ -3,9 +3,11 @@
+ *.bc
+ *.bin
+ *.bz2
++*.c.[012]*.*
+ *.cis
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+@@ -15,6 +17,7 @@
+ *.gcov
+ *.gen.S
+ *.gif
++*.gmo
+ *.grep
+ *.grp
+ *.gz
+@@ -51,14 +54,17 @@
+ *.tab.h
+ *.tex
+ *.ver
++*.vim
+ *.xml
+ *.xz
+ *_MODULES
++*_reg_safe.h
+ *_vga16.c
+ *~
+ \#*#
+ *.9
+-.*
++.[^g]*
++.gen*
+ .*.d
+ .mm
+ 53c700_d.h
+@@ -72,9 +78,11 @@ Image
+ Module.markers
+ Module.symvers
+ PENDING
++PERF*
+ SCCS
+ System.map*
+ TAGS
++TRACEEVENT-CFLAGS
+ aconf
+ af_names.h
+ aic7*reg.h*
+@@ -83,6 +91,7 @@ aic7*seq.h*
+ aicasm
+ aicdb.h*
+ altivec*.c
++ashldi3.S
+ asm-offsets.h
+ asm_offsets.h
+ autoconf.h*
+@@ -95,32 +104,40 @@ bounds.h
+ bsetup
+ btfixupprep
+ build
++builtin-policy.h
+ bvmlinux
+ bzImage*
+ capability_names.h
+ capflags.c
+ classlist.h*
++clut_vga16.c
++common-cmds.h
+ comp*.log
+ compile.h*
+ conf
+ config
+ config-*
+ config_data.h*
++config.c
+ config.mak
+ config.mak.autogen
++config.tmp
+ conmakehash
+ consolemap_deftbl.c*
+ cpustr.h
+ crc32table.h*
+ cscope.*
+ defkeymap.c
++devicetable-offsets.h
+ devlist.h*
+ dnotify_test
+ docproc
+ dslm
++dtc-lexer.lex.c
+ elf2ecoff
+ elfconfig.h*
+ evergreen_reg_safe.h
++exception_policy.conf
+ fixdep
+ flask.h
+ fore200e_mkfirm
+@@ -128,12 +145,15 @@ fore200e_pca_fw.c*
+ gconf
+ gconf.glade.h
+ gen-devlist
++gen-kdb_cmds.c
+ gen_crc32table
+ gen_init_cpio
+ generated
+ genheaders
+ genksyms
+ *_gray256.c
++hash
++hid-example
+ hpet_example
+ hugepage-mmap
+ hugepage-shm
+@@ -148,14 +168,14 @@ int32.c
+ int4.c
+ int8.c
+ kallsyms
+-kconfig
++kern_constants.h
+ keywords.c
+ ksym.c*
+ ksym.h*
+ kxgettext
+ lex.c
+ lex.*.c
+-linux
++lib1funcs.S
+ logo_*.c
+ logo_*_clut224.c
+ logo_*_mono.c
+@@ -165,14 +185,15 @@ mach-types.h
+ machtypes.h
+ map
+ map_hugetlb
+-media
+ mconf
++mdp
+ miboot*
+ mk_elfconfig
+ mkboot
+ mkbugboot
+ mkcpustr
+ mkdep
++mkpiggy
+ mkprep
+ mkregtable
+ mktables
+@@ -188,6 +209,8 @@ oui.c*
+ page-types
+ parse.c
+ parse.h
++parse-events*
++pasyms.h
+ patches*
+ pca200e.bin
+ pca200e_ecd.bin2
+@@ -197,6 +220,7 @@ perf-archive
+ piggyback
+ piggy.gzip
+ piggy.S
++pmu-*
+ pnmtologo
+ ppc_defs.h*
+ pss_boot.h
+@@ -206,7 +230,12 @@ r200_reg_safe.h
+ r300_reg_safe.h
+ r420_reg_safe.h
+ r600_reg_safe.h
++randomize_layout_hash.h
++randomize_layout_seed.h
++realmode.lds
++realmode.relocs
+ recordmcount
++regdb.c
+ relocs
+ rlim_names.h
+ rn50_reg_safe.h
+@@ -216,8 +245,12 @@ series
+ setup
+ setup.bin
+ setup.elf
++signing_key*
++size_overflow_hash.h
+ sImage
++slabinfo
+ sm_tbl*
++sortextable
+ split-include
+ syscalltab.h
+ tables.c
+@@ -227,6 +260,7 @@ tftpboot.img
+ timeconst.h
+ times.h*
+ trix_boot.h
++user_constants.h
+ utsrelease.h*
+ vdso-syms.lds
+ vdso.lds
+@@ -238,13 +272,17 @@ vdso32.lds
+ vdso32.so.dbg
+ vdso64.lds
+ vdso64.so.dbg
++vdsox32.lds
++vdsox32-syms.lds
+ version.h*
+ vmImage
+ vmlinux
+ vmlinux-*
+ vmlinux.aout
+ vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
+ vmlinuz
+ voffset.h
+ vsyscall.lds
+@@ -252,9 +290,12 @@ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
++x509*
+ zImage*
+ zconf.hash.c
++zconf.lex.c
+ zoffset.h
+diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
+index 13f888a..250729b 100644
+--- a/Documentation/kbuild/makefiles.txt
++++ b/Documentation/kbuild/makefiles.txt
+@@ -23,10 +23,11 @@ This document describes the Linux kernel Makefiles.
+ === 4 Host Program support
+ --- 4.1 Simple Host Program
+ --- 4.2 Composite Host Programs
+- --- 4.3 Using C++ for host programs
+- --- 4.4 Controlling compiler options for host programs
+- --- 4.5 When host programs are actually built
+- --- 4.6 Using hostprogs-$(CONFIG_FOO)
++ --- 4.3 Defining shared libraries
++ --- 4.4 Using C++ for host programs
++ --- 4.5 Controlling compiler options for host programs
++ --- 4.6 When host programs are actually built
++ --- 4.7 Using hostprogs-$(CONFIG_FOO)
+
+ === 5 Kbuild clean infrastructure
+
+@@ -643,7 +644,29 @@ Both possibilities are described in the following.
+ Finally, the two .o files are linked to the executable, lxdialog.
+ Note: The syntax <executable>-y is not permitted for host-programs.
+
+---- 4.3 Using C++ for host programs
++--- 4.3 Defining shared libraries
++
++ Objects with extension .so are considered shared libraries, and
++ will be compiled as position independent objects.
++ Kbuild provides support for shared libraries, but the usage
++ shall be restricted.
++ In the following example the libkconfig.so shared library is used
++ to link the executable conf.
++
++ Example:
++ #scripts/kconfig/Makefile
++ hostprogs-y := conf
++ conf-objs := conf.o libkconfig.so
++ libkconfig-objs := expr.o type.o
++
++ Shared libraries always require a corresponding -objs line, and
++ in the example above the shared library libkconfig is composed by
++ the two objects expr.o and type.o.
++ expr.o and type.o will be built as position independent code and
++ linked as a shared library libkconfig.so. C++ is not supported for
++ shared libraries.
++
++--- 4.4 Using C++ for host programs
+
+ kbuild offers support for host programs written in C++. This was
+ introduced solely to support kconfig, and is not recommended
+@@ -666,7 +689,7 @@ Both possibilities are described in the following.
+ qconf-cxxobjs := qconf.o
+ qconf-objs := check.o
+
+---- 4.4 Controlling compiler options for host programs
++--- 4.5 Controlling compiler options for host programs
+
+ When compiling host programs, it is possible to set specific flags.
+ The programs will always be compiled utilising $(HOSTCC) passed
+@@ -694,7 +717,7 @@ Both possibilities are described in the following.
+ When linking qconf, it will be passed the extra option
+ "-L$(QTDIR)/lib".
+
+---- 4.5 When host programs are actually built
++--- 4.6 When host programs are actually built
+
+ Kbuild will only build host-programs when they are referenced
+ as a prerequisite.
+@@ -725,7 +748,7 @@ Both possibilities are described in the following.
+ This will tell kbuild to build lxdialog even if not referenced in
+ any rule.
+
+---- 4.6 Using hostprogs-$(CONFIG_FOO)
++--- 4.7 Using hostprogs-$(CONFIG_FOO)
+
+ A typical pattern in a Kbuild file looks like this:
+
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 1d6f045..2714987 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1244,6 +1244,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
+ Default: 1024
+
++ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
++ ignore grsecurity's /proc restrictions
++
++ grsec_sysfs_restrict= Format: 0 | 1
++ Default: 1
++ Disables GRKERNSEC_SYSFS_RESTRICT if enabled in config
++
+ hashdist= [KNL,NUMA] Large hashes allocated during boot
+ are distributed across NUMA nodes. Defaults on
+ for 64-bit NUMA, off otherwise.
+@@ -2364,6 +2371,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ noexec=on: enable non-executable mappings (default)
+ noexec=off: disable non-executable mappings
+
++ nopcid [X86-64]
++ Disable PCID (Process-Context IDentifier) even if it
++ is supported by the processor.
++
+ nosmap [X86]
+ Disable SMAP (Supervisor Mode Access Prevention)
+ even if it is supported by processor.
+@@ -2662,6 +2673,30 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
++ virtualization environments that don't cope well with the
++ expand down segment used by UDEREF on X86-32 or the frequent
++ page table updates on X86-64.
++
++ pax_sanitize_slab=
++ Format: { 0 | 1 | off | fast | full }
++ Options '0' and '1' are only provided for backward
++ compatibility, 'off' or 'fast' should be used instead.
++ 0|off : disable slab object sanitization
++ 1|fast: enable slab object sanitization excluding
++ whitelisted slabs (default)
++ full : sanitize all slabs, even the whitelisted ones
++
++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
++
++ pax_extra_latent_entropy
++ Enable a very simple form of latent entropy extraction
++ from the first 4GB of memory as the bootmem allocator
++ passes the memory pages to the buddy allocator.
++
++ pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
++ when the processor supports PCID.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index 6fccb69..60c7c7a 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -41,6 +41,7 @@ show up in /proc/sys/kernel:
+ - kptr_restrict
+ - kstack_depth_to_print [ X86 only ]
+ - l2cr [ PPC only ]
++- modify_ldt [ X86 only ]
+ - modprobe ==> Documentation/debugging-modules.txt
+ - modules_disabled
+ - msg_next_id [ sysv ipc ]
+@@ -391,6 +392,20 @@ This flag controls the L2 cache of G3 processor boards. If
+
+ ==============================================================
+
++modify_ldt: (X86 only)
++
++Enables (1) or disables (0) the modify_ldt syscall. Modifying the LDT
++(Local Descriptor Table) may be needed to run a 16-bit or segmented code
++such as Dosemu or Wine. This is done via a system call which is not needed
++to run portable applications, and which can sometimes be abused to exploit
++some weaknesses of the architecture, opening new vulnerabilities.
++
++This sysctl allows one to increase the system's security by disabling the
++system call, or to restore compatibility with specific applications when it
++was already disabled.
++
++==============================================================
++
+ modules_disabled:
+
+ A toggle value indicating if modules are allowed to be loaded
+diff --git a/Makefile b/Makefile
+index a952801..9da1dcb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -298,7 +298,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+ HOSTCC = gcc
+ HOSTCXX = g++
+ HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
+-HOSTCXXFLAGS = -O2
++HOSTCFLAGS = -W -Wno-unused-parameter -Wno-missing-field-initializers -fno-delete-null-pointer-checks
++HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
++HOSTCXXFLAGS = -O2 -Wall -W -Wno-array-bounds
+
+ ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
+ HOSTCFLAGS += -Wno-unused-value -Wno-unused-parameter \
+@@ -434,8 +436,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
+ # Rules shared between *config targets and build targets
+
+ # Basic helpers built in scripts/
+-PHONY += scripts_basic
+-scripts_basic:
++PHONY += scripts_basic gcc-plugins
++scripts_basic: gcc-plugins
+ $(Q)$(MAKE) $(build)=scripts/basic
+ $(Q)rm -f .tmp_quiet_recordmcount
+
+@@ -615,6 +617,74 @@ endif
+ # Tell gcc to never replace conditional load with a non-conditional one
+ KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
+
++ifndef DISABLE_PAX_PLUGINS
++ifeq ($(call cc-ifversion, -ge, 0408, y), y)
++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
++else
++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
++endif
++ifneq ($(PLUGINCC),)
++ifdef CONFIG_PAX_CONSTIFY_PLUGIN
++CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
++endif
++ifdef CONFIG_PAX_MEMORY_STACKLEAK
++STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
++STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
++endif
++ifdef CONFIG_KALLOCSTAT_PLUGIN
++KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
++endif
++ifdef CONFIG_PAX_KERNEXEC_PLUGIN
++KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
++KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
++KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
++endif
++ifdef CONFIG_GRKERNSEC_RANDSTRUCT
++RANDSTRUCT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/randomize_layout_plugin.so -DRANDSTRUCT_PLUGIN
++ifdef CONFIG_GRKERNSEC_RANDSTRUCT_PERFORMANCE
++RANDSTRUCT_PLUGIN_CFLAGS += -fplugin-arg-randomize_layout_plugin-performance-mode
++endif
++endif
++ifdef CONFIG_CHECKER_PLUGIN
++ifeq ($(call cc-ifversion, -ge, 0406, y), y)
++CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
++endif
++endif
++COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
++ifdef CONFIG_PAX_SIZE_OVERFLOW
++SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
++endif
++ifdef CONFIG_PAX_LATENT_ENTROPY
++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
++endif
++ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
++STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
++endif
++INITIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/initify_plugin.so -DINITIFY_PLUGIN
++GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(INITIFY_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(RANDSTRUCT_PLUGIN_CFLAGS)
++GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
++export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN LATENT_ENTROPY_PLUGIN_CFLAGS
++ifeq ($(KBUILD_EXTMOD),)
++gcc-plugins:
++ $(Q)$(MAKE) $(build)=tools/gcc
++else
++gcc-plugins: ;
++endif
++else
++gcc-plugins:
++ifeq ($(call cc-ifversion, -ge, 0405, y), y)
++ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
++else
++ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
++endif
++ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
++endif
++endif
++
+ ifdef CONFIG_READABLE_ASM
+ # Disable optimizations that make assembler listings hard to read.
+ # reorder blocks reorders the control in the function
+@@ -714,7 +784,7 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
+ else
+ KBUILD_CFLAGS += -g
+ endif
+-KBUILD_AFLAGS += -Wa,-gdwarf-2
++KBUILD_AFLAGS += -Wa,--gdwarf-2
+ endif
+ ifdef CONFIG_DEBUG_INFO_DWARF4
+ KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
+@@ -886,7 +956,7 @@ export mod_sign_cmd
+
+
+ ifeq ($(KBUILD_EXTMOD),)
+-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+
+ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+@@ -936,6 +1006,8 @@ endif
+
+ # The actual objects are generated when descending,
+ # make sure no implicit rule kicks in
++$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
+
+ # Handle descending into subdirectories listed in $(vmlinux-dirs)
+@@ -945,7 +1017,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
+ # Error messages still appears in the original language
+
+ PHONY += $(vmlinux-dirs)
+-$(vmlinux-dirs): prepare scripts
++$(vmlinux-dirs): gcc-plugins prepare scripts
+ $(Q)$(MAKE) $(build)=$@
+
+ define filechk_kernel.release
+@@ -988,10 +1060,13 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
+
+ archprepare: archheaders archscripts prepare1 scripts_basic
+
++prepare0: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++prepare0: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ prepare0: archprepare FORCE
+ $(Q)$(MAKE) $(build)=.
+
+ # All the preparing..
++prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+ prepare: prepare0
+
+ # Generate some files
+@@ -1099,6 +1174,8 @@ all: modules
+ # using awk while concatenating to the final file.
+
+ PHONY += modules
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
+ $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
+ @$(kecho) ' Building modules, stage 2.';
+@@ -1114,7 +1191,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
+
+ # Target to prepare building external modules
+ PHONY += modules_prepare
+-modules_prepare: prepare scripts
++modules_prepare: gcc-plugins prepare scripts
+
+ # Target to install modules
+ PHONY += modules_install
+@@ -1180,7 +1257,10 @@ MRPROPER_FILES += .config .config.old .version .old_version \
+ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
+ signing_key.priv signing_key.x509 x509.genkey \
+ extra_certificates signing_key.x509.keyid \
+- signing_key.x509.signer vmlinux-gdb.py
++ signing_key.x509.signer vmlinux-gdb.py \
++ tools/gcc/size_overflow_plugin/size_overflow_hash_aux.h \
++ tools/gcc/size_overflow_plugin/size_overflow_hash.h \
++ tools/gcc/randomize_layout_seed.h
+
+ # clean - Delete most, but leave enough to build external modules
+ #
+@@ -1219,7 +1299,7 @@ distclean: mrproper
+ @find $(srctree) $(RCS_FIND_IGNORE) \
+ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
+- -o -name '.*.rej' -o -name '*%' -o -name 'core' \) \
++ -o -name '.*.rej' -o -name '*.so' -o -name '*%' -o -name 'core' \) \
+ -type f -print | xargs rm -f
+
+
+@@ -1385,6 +1465,8 @@ PHONY += $(module-dirs) modules
+ $(module-dirs): crmodverdir $(objtree)/Module.symvers
+ $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
+
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ modules: $(module-dirs)
+ @$(kecho) ' Building modules, stage 2.';
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+@@ -1525,17 +1607,21 @@ else
+ target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
+ endif
+
+-%.s: %.c prepare scripts FORCE
++%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.s: %.c gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.i: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.c prepare scripts FORCE
++%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.o: %.c gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.lst: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.s: %.S prepare scripts FORCE
++%.s: %.S gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.S prepare scripts FORCE
++%.o: %.S gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.symtypes: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+@@ -1547,11 +1633,15 @@ endif
+ $(build)=$(build-dir)
+ # Make sure the latest headers are built for Documentation
+ Documentation/: headers_install
+-%/: prepare scripts FORCE
++%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%/: gcc-plugins prepare scripts FORCE
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir)
+-%.ko: prepare scripts FORCE
++%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.ko: gcc-plugins prepare scripts FORCE
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir) $(@:.ko=.o)
+diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
+index 8f8eafb..3405f46 100644
+--- a/arch/alpha/include/asm/atomic.h
++++ b/arch/alpha/include/asm/atomic.h
+@@ -239,4 +239,14 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic_dec(v) atomic_sub(1,(v))
+ #define atomic64_dec(v) atomic64_sub(1,(v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* _ALPHA_ATOMIC_H */
+diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
+index ad368a9..fbe0f25 100644
+--- a/arch/alpha/include/asm/cache.h
++++ b/arch/alpha/include/asm/cache.h
+@@ -4,19 +4,19 @@
+ #ifndef __ARCH_ALPHA_CACHE_H
+ #define __ARCH_ALPHA_CACHE_H
+
++#include <linux/const.h>
+
+ /* Bytes per L1 (data) cache line. */
+ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
+-# define L1_CACHE_BYTES 64
+ # define L1_CACHE_SHIFT 6
+ #else
+ /* Both EV4 and EV5 are write-through, read-allocate,
+ direct-mapped, physical.
+ */
+-# define L1_CACHE_BYTES 32
+ # define L1_CACHE_SHIFT 5
+ #endif
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+ #endif
+diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
+index 968d999..d36b2df 100644
+--- a/arch/alpha/include/asm/elf.h
++++ b/arch/alpha/include/asm/elf.h
+@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
+index aab14a0..b4fa3e7 100644
+--- a/arch/alpha/include/asm/pgalloc.h
++++ b/arch/alpha/include/asm/pgalloc.h
+@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+ pgd_set(pgd, pmd);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+ static inline void
+diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
+index a9a1195..e9b8417 100644
+--- a/arch/alpha/include/asm/pgtable.h
++++ b/arch/alpha/include/asm/pgtable.h
+@@ -101,6 +101,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
+index 2fd00b7..cfd5069 100644
+--- a/arch/alpha/kernel/module.c
++++ b/arch/alpha/kernel/module.c
+@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+- gp = (u64)me->module_core + me->core_size - 0x8000;
++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
+index 36dc91a..6769cb0 100644
+--- a/arch/alpha/kernel/osf_sys.c
++++ b/arch/alpha/kernel/osf_sys.c
+@@ -1295,10 +1295,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+ generic version except that we know how to honor ADDR_LIMIT_32BIT. */
+
+ static unsigned long
+-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+- unsigned long limit)
++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
++ unsigned long limit, unsigned long flags)
+ {
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ info.flags = 0;
+ info.length = len;
+@@ -1306,6 +1307,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+ info.high_limit = limit;
+ info.align_mask = 0;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+@@ -1338,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+ /* Finally, try allocating in low memory. */
+- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
+
+ return addr;
+ }
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 4a905bd..0a4da53 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -52,6 +52,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long *)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -132,8 +250,29 @@ retry:
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++ do_group_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
+index bd4670d..920c97a 100644
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -485,6 +485,7 @@ config ARC_DBG_TLB_MISS_COUNT
+ bool "Profile TLB Misses"
+ default n
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ help
+ Counts number of I and D TLB Misses and exports them via Debugfs
+ The counters can be cleared via Debugfs as well
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index ede2526..9e12300 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1770,7 +1770,7 @@ config ALIGNMENT_TRAP
+
+ config UACCESS_WITH_MEMCPY
+ bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
+- depends on MMU
++ depends on MMU && !PAX_MEMORY_UDEREF
+ default y if CPU_FEROCEON
+ help
+ Implement faster copy_to_user and clear_user methods for CPU
+@@ -2006,6 +2006,7 @@ config KEXEC
+ bool "Kexec system call (EXPERIMENTAL)"
+ depends on (!SMP || PM_SLEEP_SMP)
+ depends on !CPU_V7M
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
+index a2e16f9..b26e911 100644
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -7,6 +7,7 @@ config ARM_PTDUMP
+ depends on DEBUG_KERNEL
+ depends on MMU
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ Say Y here if you want to show the kernel pagetable layout in a
+ debugfs file. This information is only useful for kernel developers
+diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
+index e22c119..abe7041 100644
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -18,17 +18,41 @@
+ #include <asm/barrier.h>
+ #include <asm/cmpxchg.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
+ #ifdef __KERNEL__
+
++#ifdef CONFIG_THUMB2_KERNEL
++#define REFCOUNT_TRAP_INSN "bkpt 0xf1"
++#else
++#define REFCOUNT_TRAP_INSN "bkpt 0xf103"
++#endif
++
++#define _ASM_EXTABLE(from, to) \
++" .pushsection __ex_table,\"a\"\n"\
++" .align 3\n" \
++" .long " #from ", " #to"\n" \
++" .popsection"
++
+ /*
+ * On ARM, ordinary assignment (str instruction) doesn't clear the local
+ * strex/ldrex monitor on some implementations. The reason we can use it for
+ * atomic_set() is the clrex or dummy strex done on every exception return.
+ */
+ #define atomic_read(v) ACCESS_ONCE((v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return ACCESS_ONCE(v->counter);
++}
+ #define atomic_set(v,i) (((v)->counter) = (i))
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+
+ #if __LINUX_ARM_ARCH__ >= 6
+
+@@ -38,26 +62,50 @@
+ * to ensure that the update happens.
+ */
+
+-#define ATOMIC_OP(op, c_op, asm_op) \
+-static inline void atomic_##op(int i, atomic_t *v) \
++#ifdef CONFIG_PAX_REFCOUNT
++#define __OVERFLOW_POST \
++ " bvc 3f\n" \
++ "2: " REFCOUNT_TRAP_INSN "\n"\
++ "3:\n"
++#define __OVERFLOW_POST_RETURN \
++ " bvc 3f\n" \
++" mov %0, %1\n" \
++ "2: " REFCOUNT_TRAP_INSN "\n"\
++ "3:\n"
++#define __OVERFLOW_EXTABLE \
++ "4:\n" \
++ _ASM_EXTABLE(2b, 4b)
++#else
++#define __OVERFLOW_POST
++#define __OVERFLOW_POST_RETURN
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, c_op, asm_op, post_op, extable) \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
+ { \
+ unsigned long tmp; \
+ int result; \
+ \
+ prefetchw(&v->counter); \
+- __asm__ __volatile__("@ atomic_" #op "\n" \
++ __asm__ __volatile__("@ atomic_" #op #suffix "\n" \
+ "1: ldrex %0, [%3]\n" \
+ " " #asm_op " %0, %0, %4\n" \
++ post_op \
+ " strex %1, %0, [%3]\n" \
+ " teq %1, #0\n" \
+-" bne 1b" \
++" bne 1b\n" \
++ extable \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+ } \
+
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+-static inline int atomic_##op##_return(int i, atomic_t *v) \
++#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, _unchecked, c_op, asm_op, , )\
++ __ATOMIC_OP(op, , c_op, asm_op##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op, post_op, extable) \
++static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
+ { \
+ unsigned long tmp; \
+ int result; \
+@@ -65,12 +113,14 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
+ smp_mb(); \
+ prefetchw(&v->counter); \
+ \
+- __asm__ __volatile__("@ atomic_" #op "_return\n" \
++ __asm__ __volatile__("@ atomic_" #op "_return" #suffix "\n" \
+ "1: ldrex %0, [%3]\n" \
+ " " #asm_op " %0, %0, %4\n" \
++ post_op \
+ " strex %1, %0, [%3]\n" \
+ " teq %1, #0\n" \
+-" bne 1b" \
++" bne 1b\n" \
++ extable \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "Ir" (i) \
+ : "cc"); \
+@@ -80,6 +130,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
+ return result; \
+ }
+
++#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op, , )\
++ __ATOMIC_OP_RETURN(op, , c_op, asm_op##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
++
+ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+ {
+ int oldval;
+@@ -115,12 +168,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ __asm__ __volatile__ ("@ atomic_add_unless\n"
+ "1: ldrex %0, [%4]\n"
+ " teq %0, %5\n"
+-" beq 2f\n"
+-" add %1, %0, %6\n"
++" beq 4f\n"
++" adds %1, %0, %6\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
++"3:\n"
++#endif
++
+ " strex %2, %1, [%4]\n"
+ " teq %2, #0\n"
+ " bne 1b\n"
+-"2:"
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+@@ -131,14 +196,36 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ return oldval;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
++{
++ unsigned long oldval, res;
++
++ smp_mb();
++
++ do {
++ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
++ "ldrex %1, [%3]\n"
++ "mov %0, #0\n"
++ "teq %1, %4\n"
++ "strexeq %0, %5, [%3]\n"
++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++ : "r" (&ptr->counter), "Ir" (old), "r" (new)
++ : "cc");
++ } while (res);
++
++ smp_mb();
++
++ return oldval;
++}
++
+ #else /* ARM_ARCH_6 */
+
+ #ifdef CONFIG_SMP
+ #error SMP not supported on pre-ARMv6 CPUs
+ #endif
+
+-#define ATOMIC_OP(op, c_op, asm_op) \
+-static inline void atomic_##op(int i, atomic_t *v) \
++#define __ATOMIC_OP(op, suffix, c_op, asm_op) \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t *v) \
+ { \
+ unsigned long flags; \
+ \
+@@ -147,8 +234,11 @@ static inline void atomic_##op(int i, atomic_t *v) \
+ raw_local_irq_restore(flags); \
+ } \
+
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+-static inline int atomic_##op##_return(int i, atomic_t *v) \
++#define ATOMIC_OP(op, c_op, asm_op) __ATOMIC_OP(op, , c_op, asm_op) \
++ __ATOMIC_OP(op, _unchecked, c_op, asm_op)
++
++#define __ATOMIC_OP_RETURN(op, suffix, c_op, asm_op) \
++static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t *v)\
+ { \
+ unsigned long flags; \
+ int val; \
+@@ -161,6 +251,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
+ return val; \
+ }
+
++#define ATOMIC_OP_RETURN(op, c_op, asm_op) __ATOMIC_OP_RETURN(op, , c_op, asm_op)\
++ __ATOMIC_OP_RETURN(op, _unchecked, c_op, asm_op)
++
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ {
+ int ret;
+@@ -175,6 +268,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ return ret;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return atomic_cmpxchg((atomic_t *)v, old, new);
++}
++
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+ int c, old;
+@@ -196,16 +294,38 @@ ATOMIC_OPS(sub, -=, sub)
+
+ #undef ATOMIC_OPS
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+
+ #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v) == 0;
++}
+ #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+@@ -216,6 +336,14 @@ typedef struct {
+ long long counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ long long counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #ifdef CONFIG_ARM_LPAE
+@@ -232,6 +360,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+ return result;
+ }
+
++static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ long long result;
++
++ __asm__ __volatile__("@ atomic64_read_unchecked\n"
++" ldrd %0, %H0, [%1]"
++ : "=&r" (result)
++ : "r" (&v->counter), "Qo" (v->counter)
++ );
++
++ return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+ __asm__ __volatile__("@ atomic64_set\n"
+@@ -240,6 +381,15 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+ : "r" (&v->counter), "r" (i)
+ );
+ }
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++ __asm__ __volatile__("@ atomic64_set_unchecked\n"
++" strd %2, %H2, [%1]"
++ : "=Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ );
++}
+ #else
+ static inline long long atomic64_read(const atomic64_t *v)
+ {
+@@ -254,6 +404,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+ return result;
+ }
+
++static inline long long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ long long result;
++
++ __asm__ __volatile__("@ atomic64_read_unchecked\n"
++" ldrexd %0, %H0, [%1]"
++ : "=&r" (result)
++ : "r" (&v->counter), "Qo" (v->counter)
++ );
++
++ return result;
++}
++
+ static inline void atomic64_set(atomic64_t *v, long long i)
+ {
+ long long tmp;
+@@ -268,29 +431,57 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+ : "r" (&v->counter), "r" (i)
+ : "cc");
+ }
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++ long long tmp;
++
++ prefetchw(&v->counter);
++ __asm__ __volatile__("@ atomic64_set_unchecked\n"
++"1: ldrexd %0, %H0, [%2]\n"
++" strexd %0, %3, %H3, [%2]\n"
++" teq %0, #0\n"
++" bne 1b"
++ : "=&r" (tmp), "=Qo" (v->counter)
++ : "r" (&v->counter), "r" (i)
++ : "cc");
++}
+ #endif
+
+-#define ATOMIC64_OP(op, op1, op2) \
+-static inline void atomic64_##op(long long i, atomic64_t *v) \
++#undef __OVERFLOW_POST_RETURN
++#define __OVERFLOW_POST_RETURN \
++ " bvc 3f\n" \
++" mov %0, %1\n" \
++" mov %H0, %H1\n" \
++ "2: " REFCOUNT_TRAP_INSN "\n"\
++ "3:\n"
++
++#define __ATOMIC64_OP(op, suffix, op1, op2, post_op, extable) \
++static inline void atomic64_##op##suffix(long long i, atomic64##suffix##_t *v)\
+ { \
+ long long result; \
+ unsigned long tmp; \
+ \
+ prefetchw(&v->counter); \
+- __asm__ __volatile__("@ atomic64_" #op "\n" \
++ __asm__ __volatile__("@ atomic64_" #op #suffix "\n" \
+ "1: ldrexd %0, %H0, [%3]\n" \
+ " " #op1 " %Q0, %Q0, %Q4\n" \
+ " " #op2 " %R0, %R0, %R4\n" \
++ post_op \
+ " strexd %1, %0, %H0, [%3]\n" \
+ " teq %1, #0\n" \
+-" bne 1b" \
++" bne 1b\n" \
++ extable \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+ } \
+
+-#define ATOMIC64_OP_RETURN(op, op1, op2) \
+-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
++#define ATOMIC64_OP(op, op1, op2) __ATOMIC64_OP(op, _unchecked, op1, op2, , ) \
++ __ATOMIC64_OP(op, , op1, op2##s, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, op1, op2, post_op, extable) \
++static inline long long atomic64_##op##_return##suffix(long long i, atomic64##suffix##_t *v) \
+ { \
+ long long result; \
+ unsigned long tmp; \
+@@ -298,13 +489,15 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+ smp_mb(); \
+ prefetchw(&v->counter); \
+ \
+- __asm__ __volatile__("@ atomic64_" #op "_return\n" \
++ __asm__ __volatile__("@ atomic64_" #op "_return" #suffix "\n" \
+ "1: ldrexd %0, %H0, [%3]\n" \
+ " " #op1 " %Q0, %Q0, %Q4\n" \
+ " " #op2 " %R0, %R0, %R4\n" \
++ post_op \
+ " strexd %1, %0, %H0, [%3]\n" \
+ " teq %1, #0\n" \
+-" bne 1b" \
++" bne 1b\n" \
++ extable \
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
+ : "r" (&v->counter), "r" (i) \
+ : "cc"); \
+@@ -314,6 +507,9 @@ static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+ return result; \
+ }
+
++#define ATOMIC64_OP_RETURN(op, op1, op2) __ATOMIC64_OP_RETURN(op, _unchecked, op1, op2, , ) \
++ __ATOMIC64_OP_RETURN(op, , op1, op2##s, __OVERFLOW_POST_RETURN, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC64_OPS(op, op1, op2) \
+ ATOMIC64_OP(op, op1, op2) \
+ ATOMIC64_OP_RETURN(op, op1, op2)
+@@ -323,7 +519,12 @@ ATOMIC64_OPS(sub, subs, sbc)
+
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST_RETURN
++#undef __OVERFLOW_POST
+
+ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
+ long long new)
+@@ -351,6 +552,31 @@ static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
+ return oldval;
+ }
+
++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, long long old,
++ long long new)
++{
++ long long oldval;
++ unsigned long res;
++
++ smp_mb();
++
++ do {
++ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
++ "ldrexd %1, %H1, [%3]\n"
++ "mov %0, #0\n"
++ "teq %1, %4\n"
++ "teqeq %H1, %H4\n"
++ "strexdeq %0, %5, %H5, [%3]"
++ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
++ : "r" (&ptr->counter), "r" (old), "r" (new)
++ : "cc");
++ } while (res);
++
++ smp_mb();
++
++ return oldval;
++}
++
+ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+ {
+ long long result;
+@@ -376,21 +602,35 @@ static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ {
+ long long result;
+- unsigned long tmp;
++ u64 tmp;
+
+ smp_mb();
+ prefetchw(&v->counter);
+
+ __asm__ __volatile__("@ atomic64_dec_if_positive\n"
+-"1: ldrexd %0, %H0, [%3]\n"
+-" subs %Q0, %Q0, #1\n"
+-" sbc %R0, %R0, #0\n"
++"1: ldrexd %1, %H1, [%3]\n"
++" subs %Q0, %Q1, #1\n"
++" sbcs %R0, %R1, #0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %Q0, %Q1\n"
++" mov %R0, %R1\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
++"3:\n"
++#endif
++
+ " teq %R0, #0\n"
+-" bmi 2f\n"
++" bmi 4f\n"
+ " strexd %1, %0, %H0, [%3]\n"
+ " teq %1, #0\n"
+ " bne 1b\n"
+-"2:"
++"4:\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter)
+ : "cc");
+@@ -414,13 +654,25 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+ " teq %0, %5\n"
+ " teqeq %H0, %H5\n"
+ " moveq %1, #0\n"
+-" beq 2f\n"
++" beq 4f\n"
+ " adds %Q0, %Q0, %Q6\n"
+-" adc %R0, %R0, %R6\n"
++" adcs %R0, %R0, %R6\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: " REFCOUNT_TRAP_INSN "\n"
++"3:\n"
++#endif
++
+ " strexd %2, %0, %H0, [%4]\n"
+ " teq %2, #0\n"
+ " bne 1b\n"
+-"2:"
++"4:\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
+ : "r" (&v->counter), "r" (u), "r" (a)
+ : "cc");
+@@ -433,10 +685,13 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+
+ #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+ #define atomic64_inc(v) atomic64_add(1LL, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
+ #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+ #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+ #define atomic64_dec(v) atomic64_sub(1LL, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
+ #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
+ #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
+index 6c2327e..85beac4 100644
+--- a/arch/arm/include/asm/barrier.h
++++ b/arch/arm/include/asm/barrier.h
+@@ -67,7 +67,7 @@
+ do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+- ACCESS_ONCE(*p) = (v); \
++ ACCESS_ONCE_RW(*p) = (v); \
+ } while (0)
+
+ #define smp_load_acquire(p) \
+diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
+index 75fe66b..ba3dee4 100644
+--- a/arch/arm/include/asm/cache.h
++++ b/arch/arm/include/asm/cache.h
+@@ -4,8 +4,10 @@
+ #ifndef __ASMARM_CACHE_H
+ #define __ASMARM_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+@@ -24,5 +26,6 @@
+ #endif
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
++#define __read_only __attribute__ ((__section__(".data..read_only")))
+
+ #endif
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index 4812cda..9da8116 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -116,7 +116,7 @@ struct cpu_cache_fns {
+ void (*dma_unmap_area)(const void *, size_t, int);
+
+ void (*dma_flush_range)(const void *, const void *);
+-};
++} __no_const;
+
+ /*
+ * Select the calling method
+diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
+index 5233151..87a71fa 100644
+--- a/arch/arm/include/asm/checksum.h
++++ b/arch/arm/include/asm/checksum.h
+@@ -37,7 +37,19 @@ __wsum
+ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
+
+ __wsum
+-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
++__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
++
++static inline __wsum
++csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
++{
++ __wsum ret;
++ pax_open_userland();
++ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
++ pax_close_userland();
++ return ret;
++}
++
++
+
+ /*
+ * Fold a partial checksum without adding pseudo headers
+diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
+index 1692a05..1835802 100644
+--- a/arch/arm/include/asm/cmpxchg.h
++++ b/arch/arm/include/asm/cmpxchg.h
+@@ -107,6 +107,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
+ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
+ sizeof(*(ptr))); \
+ })
++#define xchg_unchecked(ptr, x) ({ \
++ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
++ sizeof(*(ptr))); \
++})
+
+ #include <asm-generic/cmpxchg-local.h>
+
+diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
+index 0f84249..8e83c55 100644
+--- a/arch/arm/include/asm/cpuidle.h
++++ b/arch/arm/include/asm/cpuidle.h
+@@ -32,7 +32,7 @@ struct device_node;
+ struct cpuidle_ops {
+ int (*suspend)(int cpu, unsigned long arg);
+ int (*init)(struct device_node *, int cpu);
+-};
++} __no_const;
+
+ struct of_cpuidle_method {
+ const char *method;
+diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
+index 6ddbe44..b5e38b1a 100644
+--- a/arch/arm/include/asm/domain.h
++++ b/arch/arm/include/asm/domain.h
+@@ -48,18 +48,37 @@
+ * Domain types
+ */
+ #define DOMAIN_NOACCESS 0
+-#define DOMAIN_CLIENT 1
+ #ifdef CONFIG_CPU_USE_DOMAINS
++#define DOMAIN_USERCLIENT 1
++#define DOMAIN_KERNELCLIENT 1
+ #define DOMAIN_MANAGER 3
++#define DOMAIN_VECTORS DOMAIN_USER
+ #else
++
++#ifdef CONFIG_PAX_KERNEXEC
+ #define DOMAIN_MANAGER 1
++#define DOMAIN_KERNEXEC 3
++#else
++#define DOMAIN_MANAGER 1
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define DOMAIN_USERCLIENT 0
++#define DOMAIN_UDEREF 1
++#define DOMAIN_VECTORS DOMAIN_KERNEL
++#else
++#define DOMAIN_USERCLIENT 1
++#define DOMAIN_VECTORS DOMAIN_USER
++#endif
++#define DOMAIN_KERNELCLIENT 1
++
+ #endif
+
+ #define domain_val(dom,type) ((type) << (2*(dom)))
+
+ #ifndef __ASSEMBLY__
+
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ static inline void set_domain(unsigned val)
+ {
+ asm volatile(
+@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
+ isb();
+ }
+
+-#define modify_domain(dom,type) \
+- do { \
+- struct thread_info *thread = current_thread_info(); \
+- unsigned int domain = thread->cpu_domain; \
+- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
+- thread->cpu_domain = domain | domain_val(dom, type); \
+- set_domain(thread->cpu_domain); \
+- } while (0)
+-
++extern void modify_domain(unsigned int dom, unsigned int type);
+ #else
+ static inline void set_domain(unsigned val) { }
+ static inline void modify_domain(unsigned dom, unsigned type) { }
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index d2315ff..f60b47b 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -117,7 +117,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
+index de53547..52b9a28 100644
+--- a/arch/arm/include/asm/fncpy.h
++++ b/arch/arm/include/asm/fncpy.h
+@@ -81,7 +81,9 @@
+ BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
+ (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
+ \
++ pax_open_kernel(); \
+ memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
++ pax_close_kernel(); \
+ flush_icache_range((unsigned long)(dest_buf), \
+ (unsigned long)(dest_buf) + (size)); \
+ \
+diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
+index 5eed828..365e018 100644
+--- a/arch/arm/include/asm/futex.h
++++ b/arch/arm/include/asm/futex.h
+@@ -46,6 +46,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
++ pax_open_userland();
++
+ smp_mb();
+ /* Prefetching cannot fault */
+ prefetchw(uaddr);
+@@ -63,6 +65,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ : "cc", "memory");
+ smp_mb();
+
++ pax_close_userland();
++
+ *uval = val;
+ return ret;
+ }
+@@ -94,6 +98,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ return -EFAULT;
+
+ preempt_disable();
++ pax_open_userland();
++
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: " TUSER(ldr) " %1, [%4]\n"
+ " teq %1, %2\n"
+@@ -104,6 +110,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+ : "cc", "memory");
+
++ pax_close_userland();
++
+ *uval = val;
+ preempt_enable();
+
+@@ -131,6 +139,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+ preempt_disable();
+ #endif
+ pagefault_disable();
++ pax_open_userland();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+@@ -152,6 +161,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+ ret = -ENOSYS;
+ }
+
++ pax_close_userland();
+ pagefault_enable();
+ #ifndef CONFIG_SMP
+ preempt_enable();
+diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
+index 83eb2f7..ed77159 100644
+--- a/arch/arm/include/asm/kmap_types.h
++++ b/arch/arm/include/asm/kmap_types.h
+@@ -4,6 +4,6 @@
+ /*
+ * This is the "bare minimum". AIO seems to require this.
+ */
+-#define KM_TYPE_NR 16
++#define KM_TYPE_NR 17
+
+ #endif
+diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
+index 9e614a1..3302cca 100644
+--- a/arch/arm/include/asm/mach/dma.h
++++ b/arch/arm/include/asm/mach/dma.h
+@@ -22,7 +22,7 @@ struct dma_ops {
+ int (*residue)(unsigned int, dma_t *); /* optional */
+ int (*setspeed)(unsigned int, dma_t *, int); /* optional */
+ const char *type;
+-};
++} __do_const;
+
+ struct dma_struct {
+ void *addr; /* single DMA address */
+diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
+index f98c7f3..e5c626d 100644
+--- a/arch/arm/include/asm/mach/map.h
++++ b/arch/arm/include/asm/mach/map.h
+@@ -23,17 +23,19 @@ struct map_desc {
+
+ /* types 0-3 are defined in asm/io.h */
+ enum {
+- MT_UNCACHED = 4,
+- MT_CACHECLEAN,
+- MT_MINICLEAN,
++ MT_UNCACHED_RW = 4,
++ MT_CACHECLEAN_RO,
++ MT_MINICLEAN_RO,
+ MT_LOW_VECTORS,
+ MT_HIGH_VECTORS,
+- MT_MEMORY_RWX,
++ __MT_MEMORY_RWX,
+ MT_MEMORY_RW,
+- MT_ROM,
+- MT_MEMORY_RWX_NONCACHED,
++ MT_MEMORY_RX,
++ MT_ROM_RX,
++ MT_MEMORY_RW_NONCACHED,
++ MT_MEMORY_RX_NONCACHED,
+ MT_MEMORY_RW_DTCM,
+- MT_MEMORY_RWX_ITCM,
++ MT_MEMORY_RX_ITCM,
+ MT_MEMORY_RW_SO,
+ MT_MEMORY_DMA_READY,
+ };
+diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
+index 563b92f..689d58e 100644
+--- a/arch/arm/include/asm/outercache.h
++++ b/arch/arm/include/asm/outercache.h
+@@ -39,7 +39,7 @@ struct outer_cache_fns {
+ /* This is an ARM L2C thing */
+ void (*write_sec)(unsigned long, unsigned);
+ void (*configure)(const struct l2x0_regs *);
+-};
++} __no_const;
+
+ extern struct outer_cache_fns outer_cache;
+
+diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
+index 4355f0e..cd9168e 100644
+--- a/arch/arm/include/asm/page.h
++++ b/arch/arm/include/asm/page.h
+@@ -23,6 +23,7 @@
+
+ #else
+
++#include <linux/compiler.h>
+ #include <asm/glue.h>
+
+ /*
+@@ -114,7 +115,7 @@ struct cpu_user_fns {
+ void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
+ void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma);
+-};
++} __no_const;
+
+ #ifdef MULTI_USER
+ extern struct cpu_user_fns cpu_user;
+diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
+index 19cfab5..3f5c7e9 100644
+--- a/arch/arm/include/asm/pgalloc.h
++++ b/arch/arm/include/asm/pgalloc.h
+@@ -17,6 +17,7 @@
+ #include <asm/processor.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/system_info.h>
+
+ #define check_pgt_cache() do { } while (0)
+
+@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
++
+ #else /* !CONFIG_ARM_LPAE */
+
+ /*
+@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, pmd) do { } while (0)
+ #define pud_populate(mm,pmd,pte) BUG()
++#define pud_populate_kernel(mm,pmd,pte) BUG()
+
+ #endif /* CONFIG_ARM_LPAE */
+
+@@ -128,6 +135,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+ __free_page(pte);
+ }
+
++static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
++{
++#ifdef CONFIG_ARM_LPAE
++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
++#else
++ if (addr & SECTION_SIZE)
++ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
++ else
++ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
++#endif
++ flush_pmd_entry(pmdp);
++}
++
+ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
+ pmdval_t prot)
+ {
+diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
+index 5e68278..1869bae 100644
+--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
++++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
+@@ -27,7 +27,7 @@
+ /*
+ * - section
+ */
+-#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
++#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
+ #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
+ #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
+ #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
+@@ -39,6 +39,7 @@
+ #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
+ #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
+ #define PMD_SECT_AF (_AT(pmdval_t, 0))
++#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
+
+ #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
+ #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
+@@ -68,6 +69,7 @@
+ * - extended small page/tiny page
+ */
+ #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
++#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
+ #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
+ #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
+ #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index aeddd28..207745c 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -127,6 +127,9 @@
+ #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
+ #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
+
++/* Two-level page tables only have PXN in the PGD, not in the PTE. */
++#define L_PTE_PXN (_AT(pteval_t, 0))
++
+ /*
+ * These are the memory types, defined to be compatible with
+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index a745a2a..481350a 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -80,6 +80,7 @@
+ #define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
+ #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
+ #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
++#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
+ #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
+ #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
+ #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
+@@ -91,10 +92,12 @@
+ #define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
+ #define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
+ #define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
++#define PMD_SECT_RDONLY PMD_SECT_AP2
+
+ /*
+ * To be used in assembly code with the upper page attributes.
+ */
++#define L_PTE_PXN_HIGH (1 << (53 - 32))
+ #define L_PTE_XN_HIGH (1 << (54 - 32))
+ #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
+
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index f403541..b10df68 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -33,6 +33,9 @@
+ #include <asm/pgtable-2level.h>
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ /*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+@@ -48,6 +51,9 @@
+ #define LIBRARY_TEXT_START 0x0c000000
+
+ #ifndef __ASSEMBLY__
++extern pteval_t __supported_pte_mask;
++extern pmdval_t __supported_pmd_mask;
++
+ extern void __pte_error(const char *file, int line, pte_t);
+ extern void __pmd_error(const char *file, int line, pmd_t);
+ extern void __pgd_error(const char *file, int line, pgd_t);
+@@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+ #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
+ #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
+
++#define __HAVE_ARCH_PAX_OPEN_KERNEL
++#define __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++#include <asm/domain.h>
++#include <linux/thread_info.h>
++#include <linux/preempt.h>
++
++static inline int test_domain(int domain, int domaintype)
++{
++ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
++}
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void) {
++#ifdef CONFIG_ARM_LPAE
++ /* TODO */
++#else
++ preempt_disable();
++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
++ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
++#endif
++ return 0;
++}
++
++static inline unsigned long pax_close_kernel(void) {
++#ifdef CONFIG_ARM_LPAE
++ /* TODO */
++#else
++ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
++ /* DOMAIN_MANAGER = "client" under KERNEXEC */
++ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
++ preempt_enable_no_resched();
++#endif
++ return 0;
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+ * This is the lowest virtual address we can permit any user space
+ * mapping to be mapped at. This is particularly important for
+@@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
+ /*
+ * The pgprot_* and protection_map entries will be fixed up in runtime
+ * to include the cachable and bufferable bits based on memory policy,
+- * as well as any architecture dependent bits like global/ASID and SMP
+- * shared mapping bits.
++ * as well as any architecture dependent bits like global/ASID, PXN,
++ * and SMP shared mapping bits.
+ */
+ #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
+
+@@ -307,7 +355,7 @@ static inline pte_t pte_mknexec(pte_t pte)
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
+- L_PTE_NONE | L_PTE_VALID;
++ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+ }
+diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
+index c25ef3e..735f14b 100644
+--- a/arch/arm/include/asm/psci.h
++++ b/arch/arm/include/asm/psci.h
+@@ -32,7 +32,7 @@ struct psci_operations {
+ int (*affinity_info)(unsigned long target_affinity,
+ unsigned long lowest_affinity_level);
+ int (*migrate_info_type)(void);
+-};
++} __no_const;
+
+ extern struct psci_operations psci_ops;
+ extern struct smp_operations psci_smp_ops;
+diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
+index 2f3ac1b..67182ae0 100644
+--- a/arch/arm/include/asm/smp.h
++++ b/arch/arm/include/asm/smp.h
+@@ -108,7 +108,7 @@ struct smp_operations {
+ int (*cpu_disable)(unsigned int cpu);
+ #endif
+ #endif
+-};
++} __no_const;
+
+ struct of_cpu_method {
+ const char *method;
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index bd32ede..bd90a0b 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -74,9 +74,9 @@ struct thread_info {
+ .flags = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
+- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
++ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
++ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
+ }
+
+ #define init_thread_info (init_thread_union.thread_info)
+@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define TIF_SYSCALL_AUDIT 9
+ #define TIF_SYSCALL_TRACEPOINT 10
+ #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
+-#define TIF_NOHZ 12 /* in adaptive nohz mode */
++/* within 8 bits of TIF_SYSCALL_TRACE
++ * to meet flexible second operand requirements
++ */
++#define TIF_GRSEC_SETXID 12
++#define TIF_NOHZ 13 /* in adaptive nohz mode */
+ #define TIF_USING_IWMMXT 17
+ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+ #define TIF_RESTORE_SIGMASK 20
+@@ -166,10 +170,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
+ #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+ #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
+
+ /* Checks for any syscall work in entry-common.S */
+ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
+
+ /*
+ * Change these and you break ASM code in entry-common.S
+diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
+index 5f833f7..76e6644 100644
+--- a/arch/arm/include/asm/tls.h
++++ b/arch/arm/include/asm/tls.h
+@@ -3,6 +3,7 @@
+
+ #include <linux/compiler.h>
+ #include <asm/thread_info.h>
++#include <asm/pgtable.h>
+
+ #ifdef __ASSEMBLY__
+ #include <asm/asm-offsets.h>
+@@ -89,7 +90,9 @@ static inline void set_tls(unsigned long val)
+ * at 0xffff0fe0 must be used instead. (see
+ * entry-armv.S for details)
+ */
++ pax_open_kernel();
+ *((unsigned int *)0xffff0ff0) = val;
++ pax_close_kernel();
+ #endif
+ }
+
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index 74b17d0..7e6da4b 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -18,6 +18,7 @@
+ #include <asm/domain.h>
+ #include <asm/unified.h>
+ #include <asm/compiler.h>
++#include <asm/pgtable.h>
+
+ #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ #include <asm-generic/uaccess-unaligned.h>
+@@ -70,11 +71,38 @@ extern int __put_user_bad(void);
+ static inline void set_fs(mm_segment_t fs)
+ {
+ current_thread_info()->addr_limit = fs;
+- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
++ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
+ }
+
+ #define segment_eq(a, b) ((a) == (b))
+
++#define __HAVE_ARCH_PAX_OPEN_USERLAND
++#define __HAVE_ARCH_PAX_CLOSE_USERLAND
++
++static inline void pax_open_userland(void)
++{
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (segment_eq(get_fs(), USER_DS)) {
++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
++ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
++ }
++#endif
++
++}
++
++static inline void pax_close_userland(void)
++{
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (segment_eq(get_fs(), USER_DS)) {
++ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
++ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
++ }
++#endif
++
++}
++
+ #define __addr_ok(addr) ({ \
+ unsigned long flag; \
+ __asm__("cmp %2, %0; movlo %0, #0" \
+@@ -198,8 +226,12 @@ extern int __get_user_64t_4(void *);
+
+ #define get_user(x, p) \
+ ({ \
++ int __e; \
+ might_fault(); \
+- __get_user_check(x, p); \
++ pax_open_userland(); \
++ __e = __get_user_check((x), (p)); \
++ pax_close_userland(); \
++ __e; \
+ })
+
+ extern int __put_user_1(void *, unsigned int);
+@@ -244,8 +276,12 @@ extern int __put_user_8(void *, unsigned long long);
+
+ #define put_user(x, p) \
+ ({ \
++ int __e; \
+ might_fault(); \
+- __put_user_check(x, p); \
++ pax_open_userland(); \
++ __e = __put_user_check((x), (p)); \
++ pax_close_userland(); \
++ __e; \
+ })
+
+ #else /* CONFIG_MMU */
+@@ -269,6 +305,7 @@ static inline void set_fs(mm_segment_t fs)
+
+ #endif /* CONFIG_MMU */
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
+
+ #define user_addr_max() \
+@@ -286,13 +323,17 @@ static inline void set_fs(mm_segment_t fs)
+ #define __get_user(x, ptr) \
+ ({ \
+ long __gu_err = 0; \
++ pax_open_userland(); \
+ __get_user_err((x), (ptr), __gu_err); \
++ pax_close_userland(); \
+ __gu_err; \
+ })
+
+ #define __get_user_error(x, ptr, err) \
+ ({ \
++ pax_open_userland(); \
+ __get_user_err((x), (ptr), err); \
++ pax_close_userland(); \
+ (void) 0; \
+ })
+
+@@ -368,13 +409,17 @@ do { \
+ #define __put_user(x, ptr) \
+ ({ \
+ long __pu_err = 0; \
++ pax_open_userland(); \
+ __put_user_err((x), (ptr), __pu_err); \
++ pax_close_userland(); \
+ __pu_err; \
+ })
+
+ #define __put_user_error(x, ptr, err) \
+ ({ \
++ pax_open_userland(); \
+ __put_user_err((x), (ptr), err); \
++ pax_close_userland(); \
+ (void) 0; \
+ })
+
+@@ -474,11 +519,44 @@ do { \
+
+
+ #ifdef CONFIG_MMU
+-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
+-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
+-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
++extern unsigned long __must_check __size_overflow(3) ___copy_from_user(void *to, const void __user *from, unsigned long n);
++extern unsigned long __must_check __size_overflow(3) ___copy_to_user(void __user *to, const void *from, unsigned long n);
++
++static inline unsigned long __must_check __size_overflow(3) __copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ unsigned long ret;
++
++ check_object_size(to, n, false);
++ pax_open_userland();
++ ret = ___copy_from_user(to, from, n);
++ pax_close_userland();
++ return ret;
++}
++
++static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ unsigned long ret;
++
++ check_object_size(from, n, true);
++ pax_open_userland();
++ ret = ___copy_to_user(to, from, n);
++ pax_close_userland();
++ return ret;
++}
++
++extern unsigned long __must_check __size_overflow(3) __copy_to_user_std(void __user *to, const void *from, unsigned long n);
++extern unsigned long __must_check __size_overflow(2) ___clear_user(void __user *addr, unsigned long n);
++extern unsigned long __must_check __size_overflow(2) __clear_user_std(void __user *addr, unsigned long n);
++
++static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
++{
++ unsigned long ret;
++ pax_open_userland();
++ ret = ___clear_user(addr, n);
++ pax_close_userland();
++ return ret;
++}
++
+ #else
+ #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
+ #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
+@@ -487,6 +565,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
+
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else /* security hole - plug it */
+@@ -496,6 +577,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
+index 5af0ed1..cea83883 100644
+--- a/arch/arm/include/uapi/asm/ptrace.h
++++ b/arch/arm/include/uapi/asm/ptrace.h
+@@ -92,7 +92,7 @@
+ * ARMv7 groups of PSR bits
+ */
+ #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
+-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
++#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
+ #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
+ #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
+
+diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
+index 5e5a51a..b21eeef 100644
+--- a/arch/arm/kernel/armksyms.c
++++ b/arch/arm/kernel/armksyms.c
+@@ -58,7 +58,7 @@ EXPORT_SYMBOL(arm_delay_ops);
+
+ /* networking */
+ EXPORT_SYMBOL(csum_partial);
+-EXPORT_SYMBOL(csum_partial_copy_from_user);
++EXPORT_SYMBOL(__csum_partial_copy_from_user);
+ EXPORT_SYMBOL(csum_partial_copy_nocheck);
+ EXPORT_SYMBOL(__csum_ipv6_magic);
+
+@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
+ #ifdef CONFIG_MMU
+ EXPORT_SYMBOL(copy_page);
+
+-EXPORT_SYMBOL(__copy_from_user);
+-EXPORT_SYMBOL(__copy_to_user);
+-EXPORT_SYMBOL(__clear_user);
++EXPORT_SYMBOL(___copy_from_user);
++EXPORT_SYMBOL(___copy_to_user);
++EXPORT_SYMBOL(___clear_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+ EXPORT_SYMBOL(__get_user_2);
+diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
+index 318da33..373689f 100644
+--- a/arch/arm/kernel/cpuidle.c
++++ b/arch/arm/kernel/cpuidle.c
+@@ -19,7 +19,7 @@ extern struct of_cpuidle_method __cpuidle_method_of_table[];
+ static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel
+ __used __section(__cpuidle_method_of_table_end);
+
+-static struct cpuidle_ops cpuidle_ops[NR_CPUS];
++static struct cpuidle_ops cpuidle_ops[NR_CPUS] __read_only;
+
+ /**
+ * arm_cpuidle_simple_enter() - a wrapper to cpu_do_idle()
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index cb4fb1e..dc7fcaf 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -50,6 +50,87 @@
+ 9997:
+ .endm
+
++ .macro pax_enter_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ make aligned space for saved DACR
++ sub sp, sp, #8
++ @ save regs
++ stmdb sp!, {r1, r2}
++ @ read DACR from cpu_domain into r1
++ mov r2, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r2, r2, #(0x1fc0)
++ bic r2, r2, #(0x3f)
++ ldr r1, [r2, #TI_CPU_DOMAIN]
++ @ store old DACR on stack
++ str r1, [sp, #8]
++#ifdef CONFIG_PAX_KERNEXEC
++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++#endif
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r2, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r1, r2}
++#endif
++ .endm
++
++ .macro pax_open_userland
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read DACR from cpu_domain into r1
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ ldr r1, [r0, #TI_CPU_DOMAIN]
++ @ set current DOMAIN_USER to DOMAIN_CLIENT
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
++
++ .macro pax_close_userland
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read DACR from cpu_domain into r1
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ ldr r1, [r0, #TI_CPU_DOMAIN]
++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
++
+ .macro pabt_helper
+ @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
+ #ifdef MULTI_PABORT
+@@ -92,11 +173,15 @@
+ * Invalid mode handlers
+ */
+ .macro inv_entry, reason
++
++ pax_enter_kernel
++
+ sub sp, sp, #S_FRAME_SIZE
+ ARM( stmib sp, {r1 - lr} )
+ THUMB( stmia sp, {r0 - r12} )
+ THUMB( str sp, [sp, #S_SP] )
+ THUMB( str lr, [sp, #S_LR] )
++
+ mov r1, #\reason
+ .endm
+
+@@ -152,7 +237,11 @@ ENDPROC(__und_invalid)
+ .macro svc_entry, stack_hole=0, trace=1
+ UNWIND(.fnstart )
+ UNWIND(.save {r0 - pc} )
++
++ pax_enter_kernel
++
+ sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
++
+ #ifdef CONFIG_THUMB2_KERNEL
+ SPFIX( str r0, [sp] ) @ temporarily saved
+ SPFIX( mov r0, sp )
+@@ -167,7 +256,12 @@ ENDPROC(__und_invalid)
+ ldmia r0, {r3 - r5}
+ add r7, sp, #S_SP - 4 @ here for interlock avoidance
+ mov r6, #-1 @ "" "" "" ""
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ offset sp by 8 as done in pax_enter_kernel
++ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
++#else
+ add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
++#endif
+ SPFIX( addeq r2, r2, #4 )
+ str r3, [sp, #-4]! @ save the "real" r0 copied
+ @ from the exception stack
+@@ -371,6 +465,9 @@ ENDPROC(__fiq_abt)
+ .macro usr_entry, trace=1
+ UNWIND(.fnstart )
+ UNWIND(.cantunwind ) @ don't unwind the user space
++
++ pax_enter_kernel_user
++
+ sub sp, sp, #S_FRAME_SIZE
+ ARM( stmib sp, {r1 - r12} )
+ THUMB( stmia sp, {r0 - r12} )
+@@ -481,7 +578,9 @@ __und_usr:
+ tst r3, #PSR_T_BIT @ Thumb mode?
+ bne __und_usr_thumb
+ sub r4, r2, #4 @ ARM instr at LR - 4
++ pax_open_userland
+ 1: ldrt r0, [r4]
++ pax_close_userland
+ ARM_BE8(rev r0, r0) @ little endian instruction
+
+ @ r0 = 32-bit ARM instruction which caused the exception
+@@ -515,11 +614,15 @@ __und_usr_thumb:
+ */
+ .arch armv6t2
+ #endif
++ pax_open_userland
+ 2: ldrht r5, [r4]
++ pax_close_userland
+ ARM_BE8(rev16 r5, r5) @ little endian instruction
+ cmp r5, #0xe800 @ 32bit instruction if xx != 0
+ blo __und_usr_fault_16 @ 16bit undefined instruction
++ pax_open_userland
+ 3: ldrht r0, [r2]
++ pax_close_userland
+ ARM_BE8(rev16 r0, r0) @ little endian instruction
+ add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
+ str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
+@@ -549,7 +652,8 @@ ENDPROC(__und_usr)
+ */
+ .pushsection .text.fixup, "ax"
+ .align 2
+-4: str r4, [sp, #S_PC] @ retry current instruction
++4: pax_close_userland
++ str r4, [sp, #S_PC] @ retry current instruction
+ ret r9
+ .popsection
+ .pushsection __ex_table,"a"
+@@ -769,7 +873,7 @@ ENTRY(__switch_to)
+ THUMB( str lr, [ip], #4 )
+ ldr r4, [r2, #TI_TP_VALUE]
+ ldr r5, [r2, #TI_TP_VALUE + 4]
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ ldr r6, [r2, #TI_CPU_DOMAIN]
+ #endif
+ switch_tls r1, r4, r5, r3, r7
+@@ -778,7 +882,7 @@ ENTRY(__switch_to)
+ ldr r8, =__stack_chk_guard
+ ldr r7, [r7, #TSK_STACK_CANARY]
+ #endif
+-#ifdef CONFIG_CPU_USE_DOMAINS
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
+ mcr p15, 0, r6, c3, c0, 0 @ Set domain register
+ #endif
+ mov r5, r0
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index b48dd4f..9f9a72f 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -11,18 +11,46 @@
+ #include <asm/assembler.h>
+ #include <asm/unistd.h>
+ #include <asm/ftrace.h>
++#include <asm/domain.h>
+ #include <asm/unwind.h>
+
++#include "entry-header.S"
++
+ #ifdef CONFIG_NEED_RET_TO_USER
+ #include <mach/entry-macro.S>
+ #else
+ .macro arch_ret_to_user, tmp1, tmp2
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ save regs
++ stmdb sp!, {r1, r2}
++ @ read DACR from cpu_domain into r1
++ mov r2, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r2, r2, #(0x1fc0)
++ bic r2, r2, #(0x3f)
++ ldr r1, [r2, #TI_CPU_DOMAIN]
++#ifdef CONFIG_PAX_KERNEXEC
++ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ set current DOMAIN_USER to DOMAIN_UDEREF
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
++#endif
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r2, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r1, r2}
++#endif
+ .endm
+ #endif
+
+-#include "entry-header.S"
+-
+-
+ .align 5
+ /*
+ * This is the fast syscall return path. We do as little as
+@@ -174,6 +202,12 @@ ENTRY(vector_swi)
+ USER( ldr scno, [lr, #-4] ) @ get SWI instruction
+ #endif
+
++ /*
++ * do this here to avoid a performance hit of wrapping the code above
++ * that directly dereferences userland to parse the SWI instruction
++ */
++ pax_enter_kernel_user
++
+ adr tbl, sys_call_table @ load syscall table pointer
+
+ #if defined(CONFIG_OABI_COMPAT)
+diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
+index 1a0045a..9b4f34d 100644
+--- a/arch/arm/kernel/entry-header.S
++++ b/arch/arm/kernel/entry-header.S
+@@ -196,6 +196,60 @@
+ msr cpsr_c, \rtemp @ switch back to the SVC mode
+ .endm
+
++ .macro pax_enter_kernel_user
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read DACR from cpu_domain into r1
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ ldr r1, [r0, #TI_CPU_DOMAIN]
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ @ set current DOMAIN_USER to DOMAIN_NOACCESS
++ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
++#endif
++#ifdef CONFIG_PAX_KERNEXEC
++ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
++ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
++ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
++#endif
++ @ write r1 to current_thread_info()->cpu_domain
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
++
++ .macro pax_exit_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ @ save regs
++ stmdb sp!, {r0, r1}
++ @ read old DACR from stack into r1
++ ldr r1, [sp, #(8 + S_SP)]
++ sub r1, r1, #8
++ ldr r1, [r1]
++
++ @ write r1 to current_thread_info()->cpu_domain
++ mov r0, sp
++ @ assume 8K pages, since we have to split the immediate in two
++ bic r0, r0, #(0x1fc0)
++ bic r0, r0, #(0x3f)
++ str r1, [r0, #TI_CPU_DOMAIN]
++ @ write r1 to DACR
++ mcr p15, 0, r1, c3, c0, 0
++ @ instruction sync
++ instr_sync
++ @ restore regs
++ ldmia sp!, {r0, r1}
++#endif
++ .endm
++
+ #ifndef CONFIG_THUMB2_KERNEL
+ .macro svc_exit, rpsr, irq = 0
+ .if \irq != 0
+@@ -215,6 +269,9 @@
+ blne trace_hardirqs_off
+ #endif
+ .endif
++
++ pax_exit_kernel
++
+ msr spsr_cxsf, \rpsr
+ #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
+ @ We must avoid clrex due to Cortex-A15 erratum #830321
+@@ -291,6 +348,9 @@
+ blne trace_hardirqs_off
+ #endif
+ .endif
++
++ pax_exit_kernel
++
+ ldr lr, [sp, #S_SP] @ top of the stack
+ ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
+
+diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
+index 059c3da..8e45cfc 100644
+--- a/arch/arm/kernel/fiq.c
++++ b/arch/arm/kernel/fiq.c
+@@ -95,7 +95,10 @@ void set_fiq_handler(void *start, unsigned int length)
+ void *base = vectors_page;
+ unsigned offset = FIQ_OFFSET;
+
++ pax_open_kernel();
+ memcpy(base + offset, start, length);
++ pax_close_kernel();
++
+ if (!cache_is_vipt_nonaliasing())
+ flush_icache_range((unsigned long)base + offset, offset +
+ length);
+diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
+index 29e2991..7bc5757 100644
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -467,7 +467,7 @@ __enable_mmu:
+ mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
+- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
++ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
+ mcr p15, 0, r5, c3, c0, 0 @ load domain access register
+ mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
+ #endif
+diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
+index 097e2e2..3927085 100644
+--- a/arch/arm/kernel/module-plts.c
++++ b/arch/arm/kernel/module-plts.c
+@@ -30,17 +30,12 @@ struct plt_entries {
+ u32 lit[PLT_ENT_COUNT];
+ };
+
+-static bool in_init(const struct module *mod, u32 addr)
+-{
+- return addr - (u32)mod->module_init < mod->init_size;
+-}
+-
+ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
+ {
+ struct plt_entries *plt, *plt_end;
+ int c, *count;
+
+- if (in_init(mod, loc)) {
++ if (within_module_init(loc, mod)) {
+ plt = (void *)mod->arch.init_plt->sh_addr;
+ plt_end = (void *)plt + mod->arch.init_plt->sh_size;
+ count = &mod->arch.init_plt_count;
+diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
+index efdddcb..35e58f6 100644
+--- a/arch/arm/kernel/module.c
++++ b/arch/arm/kernel/module.c
+@@ -38,17 +38,47 @@
+ #endif
+
+ #ifdef CONFIG_MMU
+-void *module_alloc(unsigned long size)
++static inline void *__module_alloc(unsigned long size, pgprot_t prot)
+ {
+- void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
++ void *p;
++
++ if (!size || (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) && PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR))
++ return NULL;
++
++ p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
++ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
+ return p;
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
++ GFP_KERNEL, prot, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+ }
++
++void *module_alloc(unsigned long size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ return __module_alloc(size, PAGE_KERNEL);
++#else
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++#endif
++
++}
++
++#ifdef CONFIG_PAX_KERNEXEC
++void module_memfree_exec(void *module_region)
++{
++ module_memfree(module_region);
++}
++EXPORT_SYMBOL(module_memfree_exec);
++
++void *module_alloc_exec(unsigned long size)
++{
++ return __module_alloc(size, PAGE_KERNEL_EXEC);
++}
++EXPORT_SYMBOL(module_alloc_exec);
++#endif
+ #endif
+
+ int
+diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
+index 69bda1a..755113a 100644
+--- a/arch/arm/kernel/patch.c
++++ b/arch/arm/kernel/patch.c
+@@ -66,6 +66,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
+ else
+ __acquire(&patch_lock);
+
++ pax_open_kernel();
+ if (thumb2 && __opcode_is_thumb16(insn)) {
+ *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
+ size = sizeof(u16);
+@@ -97,6 +98,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
+ *(u32 *)waddr = insn;
+ size = sizeof(u32);
+ }
++ pax_close_kernel();
+
+ if (waddr != addr) {
+ flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index f192a2a..1a40523 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -105,8 +105,8 @@ void __show_regs(struct pt_regs *regs)
+
+ show_regs_print_info(KERN_DEFAULT);
+
+- print_symbol("PC is at %s\n", instruction_pointer(regs));
+- print_symbol("LR is at %s\n", regs->ARM_lr);
++ printk("PC is at %pA\n", (void *)instruction_pointer(regs));
++ printk("LR is at %pA\n", (void *)regs->ARM_lr);
+ printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
+ "sp : %08lx ip : %08lx fp : %08lx\n",
+ regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
+@@ -283,12 +283,6 @@ unsigned long get_wchan(struct task_struct *p)
+ return 0;
+ }
+
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long range_end = mm->brk + 0x02000000;
+- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+-}
+-
+ #ifdef CONFIG_MMU
+ #ifdef CONFIG_KUSER_HELPERS
+ /*
+@@ -304,7 +298,7 @@ static struct vm_area_struct gate_vma = {
+
+ static int __init gate_vma_init(void)
+ {
+- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+ return 0;
+ }
+ arch_initcall(gate_vma_init);
+@@ -333,91 +327,13 @@ const char *arch_vma_name(struct vm_area_struct *vma)
+ return is_gate_vma(vma) ? "[vectors]" : NULL;
+ }
+
+-/* If possible, provide a placement hint at a random offset from the
+- * stack for the sigpage and vdso pages.
+- */
+-static unsigned long sigpage_addr(const struct mm_struct *mm,
+- unsigned int npages)
+-{
+- unsigned long offset;
+- unsigned long first;
+- unsigned long last;
+- unsigned long addr;
+- unsigned int slots;
+-
+- first = PAGE_ALIGN(mm->start_stack);
+-
+- last = TASK_SIZE - (npages << PAGE_SHIFT);
+-
+- /* No room after stack? */
+- if (first > last)
+- return 0;
+-
+- /* Just enough room? */
+- if (first == last)
+- return first;
+-
+- slots = ((last - first) >> PAGE_SHIFT) + 1;
+-
+- offset = get_random_int() % slots;
+-
+- addr = first + (offset << PAGE_SHIFT);
+-
+- return addr;
+-}
+-
+-static struct page *signal_page;
+-extern struct page *get_signal_page(void);
+-
+-static const struct vm_special_mapping sigpage_mapping = {
+- .name = "[sigpage]",
+- .pages = &signal_page,
+-};
+-
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+ struct mm_struct *mm = current->mm;
+- struct vm_area_struct *vma;
+- unsigned long npages;
+- unsigned long addr;
+- unsigned long hint;
+- int ret = 0;
+-
+- if (!signal_page)
+- signal_page = get_signal_page();
+- if (!signal_page)
+- return -ENOMEM;
+-
+- npages = 1; /* for sigpage */
+- npages += vdso_total_pages;
+
+ down_write(&mm->mmap_sem);
+- hint = sigpage_addr(mm, npages);
+- addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
+- if (IS_ERR_VALUE(addr)) {
+- ret = addr;
+- goto up_fail;
+- }
+-
+- vma = _install_special_mapping(mm, addr, PAGE_SIZE,
+- VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+- &sigpage_mapping);
+-
+- if (IS_ERR(vma)) {
+- ret = PTR_ERR(vma);
+- goto up_fail;
+- }
+-
+- mm->context.sigpage = addr;
+-
+- /* Unlike the sigpage, failure to install the vdso is unlikely
+- * to be fatal to the process, so no error check needed
+- * here.
+- */
+- arm_install_vdso(mm, addr + PAGE_SIZE);
+-
+- up_fail:
++ mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
+ up_write(&mm->mmap_sem);
+- return ret;
++ return 0;
+ }
+ #endif
+diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
+index f90fdf4..24e8c84 100644
+--- a/arch/arm/kernel/psci.c
++++ b/arch/arm/kernel/psci.c
+@@ -26,7 +26,7 @@
+ #include <asm/psci.h>
+ #include <asm/system_misc.h>
+
+-struct psci_operations psci_ops;
++struct psci_operations psci_ops __read_only;
+
+ static int (*invoke_psci_fn)(u32, u32, u32, u32);
+ typedef int (*psci_initcall_t)(const struct device_node *);
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index ef9119f..31995a3 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -928,10 +928,19 @@ static void tracehook_report_syscall(struct pt_regs *regs,
+ regs->ARM_ip = ip;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
+ {
+ current_thread_info()->syscall = scno;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ /* Do the secure computing check first; failures should be fast. */
+ #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
+ if (secure_computing() == -1)
+diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
+index 3826935..8ed63ed 100644
+--- a/arch/arm/kernel/reboot.c
++++ b/arch/arm/kernel/reboot.c
+@@ -122,6 +122,7 @@ void machine_power_off(void)
+
+ if (pm_power_off)
+ pm_power_off();
++ while (1);
+ }
+
+ /*
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index 36c18b7..0d78292 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -108,21 +108,23 @@ EXPORT_SYMBOL(elf_hwcap);
+ unsigned int elf_hwcap2 __read_mostly;
+ EXPORT_SYMBOL(elf_hwcap2);
+
++pteval_t __supported_pte_mask __read_only;
++pmdval_t __supported_pmd_mask __read_only;
+
+ #ifdef MULTI_CPU
+-struct processor processor __read_mostly;
++struct processor processor __read_only;
+ #endif
+ #ifdef MULTI_TLB
+-struct cpu_tlb_fns cpu_tlb __read_mostly;
++struct cpu_tlb_fns cpu_tlb __read_only;
+ #endif
+ #ifdef MULTI_USER
+-struct cpu_user_fns cpu_user __read_mostly;
++struct cpu_user_fns cpu_user __read_only;
+ #endif
+ #ifdef MULTI_CACHE
+-struct cpu_cache_fns cpu_cache __read_mostly;
++struct cpu_cache_fns cpu_cache __read_only;
+ #endif
+ #ifdef CONFIG_OUTER_CACHE
+-struct outer_cache_fns outer_cache __read_mostly;
++struct outer_cache_fns outer_cache __read_only;
+ EXPORT_SYMBOL(outer_cache);
+ #endif
+
+@@ -253,9 +255,13 @@ static int __get_cpu_architecture(void)
+ * Register 0 and check for VMSAv7 or PMSAv7 */
+ unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
+ if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
+- (mmfr0 & 0x000000f0) >= 0x00000030)
++ (mmfr0 & 0x000000f0) >= 0x00000030) {
+ cpu_arch = CPU_ARCH_ARMv7;
+- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
++ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
++ __supported_pte_mask |= L_PTE_PXN;
++ __supported_pmd_mask |= PMD_PXNTABLE;
++ }
++ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
+ (mmfr0 & 0x000000f0) == 0x00000020)
+ cpu_arch = CPU_ARCH_ARMv6;
+ else
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index 586eef2..61aabd4 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -24,8 +24,6 @@
+
+ extern const unsigned long sigreturn_codes[7];
+
+-static unsigned long signal_return_offset;
+-
+ #ifdef CONFIG_CRUNCH
+ static int preserve_crunch_context(struct crunch_sigframe __user *frame)
+ {
+@@ -390,8 +388,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
+ * except when the MPU has protected the vectors
+ * page from PL0
+ */
+- retcode = mm->context.sigpage + signal_return_offset +
+- (idx << 2) + thumb;
++ retcode = mm->context.sigpage + (idx << 2) + thumb;
+ } else
+ #endif
+ {
+@@ -597,33 +594,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ } while (thread_flags & _TIF_WORK_MASK);
+ return 0;
+ }
+-
+-struct page *get_signal_page(void)
+-{
+- unsigned long ptr;
+- unsigned offset;
+- struct page *page;
+- void *addr;
+-
+- page = alloc_pages(GFP_KERNEL, 0);
+-
+- if (!page)
+- return NULL;
+-
+- addr = page_address(page);
+-
+- /* Give the signal return code some randomness */
+- offset = 0x200 + (get_random_int() & 0x7fc);
+- signal_return_offset = offset;
+-
+- /*
+- * Copy signal return handlers into the vector page, and
+- * set sigreturn to be a pointer to these.
+- */
+- memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+-
+- ptr = (unsigned long)addr + offset;
+- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+-
+- return page;
+-}
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 3d6b782..8b3baeb 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -76,7 +76,7 @@ enum ipi_msg_type {
+
+ static DECLARE_COMPLETION(cpu_running);
+
+-static struct smp_operations smp_ops;
++static struct smp_operations smp_ops __read_only;
+
+ void __init smp_set_ops(struct smp_operations *ops)
+ {
+diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
+index b10e136..cb5edf9 100644
+--- a/arch/arm/kernel/tcm.c
++++ b/arch/arm/kernel/tcm.c
+@@ -64,7 +64,7 @@ static struct map_desc itcm_iomap[] __initdata = {
+ .virtual = ITCM_OFFSET,
+ .pfn = __phys_to_pfn(ITCM_OFFSET),
+ .length = 0,
+- .type = MT_MEMORY_RWX_ITCM,
++ .type = MT_MEMORY_RX_ITCM,
+ }
+ };
+
+@@ -362,7 +362,9 @@ no_dtcm:
+ start = &__sitcm_text;
+ end = &__eitcm_text;
+ ram = &__itcm_start;
++ pax_open_kernel();
+ memcpy(start, ram, itcm_code_sz);
++ pax_close_kernel();
+ pr_debug("CPU ITCM: copied code from %p - %p\n",
+ start, end);
+ itcm_present = true;
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index d358226..bfd4019 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -65,7 +65,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+ {
+ #ifdef CONFIG_KALLSYMS
+- printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
++ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
+ #else
+ printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ #endif
+@@ -267,6 +267,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+ static int die_owner = -1;
+ static unsigned int die_nest_count;
+
++extern void gr_handle_kernel_exploit(void);
++
+ static unsigned long oops_begin(void)
+ {
+ int cpu;
+@@ -309,6 +311,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
++
++ gr_handle_kernel_exploit();
++
+ if (signr)
+ do_exit(signr);
+ }
+@@ -870,7 +875,11 @@ void __init early_trap_init(void *vectors_base)
+ kuser_init(vectors_base);
+
+ flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
+- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
++
++#ifndef CONFIG_PAX_MEMORY_UDEREF
++ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
++#endif
++
+ #else /* ifndef CONFIG_CPU_V7M */
+ /*
+ * on V7-M there is no need to copy the vector table to a dedicated
+diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
+index 8b60fde..8d986dd 100644
+--- a/arch/arm/kernel/vmlinux.lds.S
++++ b/arch/arm/kernel/vmlinux.lds.S
+@@ -37,7 +37,7 @@
+ #endif
+
+ #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+- defined(CONFIG_GENERIC_BUG)
++ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
+ #define ARM_EXIT_KEEP(x) x
+ #define ARM_EXIT_DISCARD(x)
+ #else
+@@ -120,6 +120,8 @@ SECTIONS
+ #ifdef CONFIG_DEBUG_RODATA
+ . = ALIGN(1<<SECTION_SHIFT);
+ #endif
++ _etext = .; /* End of text section */
++
+ RO_DATA(PAGE_SIZE)
+
+ . = ALIGN(4);
+@@ -150,8 +152,6 @@ SECTIONS
+
+ NOTES
+
+- _etext = .; /* End of text and rodata section */
+-
+ #ifndef CONFIG_XIP_KERNEL
+ # ifdef CONFIG_ARM_KERNMEM_PERMS
+ . = ALIGN(1<<SECTION_SHIFT);
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index f9c341c..7430436 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -57,7 +57,7 @@ static unsigned long hyp_default_vectors;
+ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
+
+ /* The VMID used in the VTTBR */
+-static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
++static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
+ static u8 kvm_next_vmid;
+ static DEFINE_SPINLOCK(kvm_vmid_lock);
+
+@@ -372,7 +372,7 @@ void force_vm_exit(const cpumask_t *mask)
+ */
+ static bool need_new_vmid_gen(struct kvm *kvm)
+ {
+- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
++ return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
+ }
+
+ /**
+@@ -405,7 +405,7 @@ static void update_vttbr(struct kvm *kvm)
+
+ /* First user of a new VMID generation? */
+ if (unlikely(kvm_next_vmid == 0)) {
+- atomic64_inc(&kvm_vmid_gen);
++ atomic64_inc_unchecked(&kvm_vmid_gen);
+ kvm_next_vmid = 1;
+
+ /*
+@@ -422,7 +422,7 @@ static void update_vttbr(struct kvm *kvm)
+ kvm_call_hyp(__kvm_flush_vm_context);
+ }
+
+- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
++ kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
+ kvm->arch.vmid = kvm_next_vmid;
+ kvm_next_vmid++;
+
+@@ -1110,7 +1110,7 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
+ /**
+ * Initialize Hyp-mode and memory mappings on all CPUs.
+ */
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+ int err;
+ int ret, cpu;
+diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
+index 1710fd7..ec3e014 100644
+--- a/arch/arm/lib/clear_user.S
++++ b/arch/arm/lib/clear_user.S
+@@ -12,14 +12,14 @@
+
+ .text
+
+-/* Prototype: int __clear_user(void *addr, size_t sz)
++/* Prototype: int ___clear_user(void *addr, size_t sz)
+ * Purpose : clear some user memory
+ * Params : addr - user memory address to clear
+ * : sz - number of bytes to clear
+ * Returns : number of bytes NOT cleared
+ */
+ ENTRY(__clear_user_std)
+-WEAK(__clear_user)
++WEAK(___clear_user)
+ stmfd sp!, {r1, lr}
+ mov r2, #0
+ cmp r1, #4
+@@ -44,7 +44,7 @@ WEAK(__clear_user)
+ USER( strnebt r2, [r0])
+ mov r0, #0
+ ldmfd sp!, {r1, pc}
+-ENDPROC(__clear_user)
++ENDPROC(___clear_user)
+ ENDPROC(__clear_user_std)
+
+ .pushsection .text.fixup,"ax"
+diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
+index 7a235b9..73a0556 100644
+--- a/arch/arm/lib/copy_from_user.S
++++ b/arch/arm/lib/copy_from_user.S
+@@ -17,7 +17,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_from_user(void *to, const void *from, size_t n)
++ * size_t ___copy_from_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -89,11 +89,11 @@
+
+ .text
+
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
+index 6ee2f67..d1cce76 100644
+--- a/arch/arm/lib/copy_page.S
++++ b/arch/arm/lib/copy_page.S
+@@ -10,6 +10,7 @@
+ * ASM optimised string functions
+ */
+ #include <linux/linkage.h>
++#include <linux/const.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/cache.h>
+diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
+index 9648b06..19c333c 100644
+--- a/arch/arm/lib/copy_to_user.S
++++ b/arch/arm/lib/copy_to_user.S
+@@ -17,7 +17,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_to_user(void *to, const void *from, size_t n)
++ * size_t ___copy_to_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -93,11 +93,11 @@
+ .text
+
+ ENTRY(__copy_to_user_std)
+-WEAK(__copy_to_user)
++WEAK(___copy_to_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+ ENDPROC(__copy_to_user_std)
+
+ .pushsection .text.fixup,"ax"
+diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
+index 1d0957e..f708846 100644
+--- a/arch/arm/lib/csumpartialcopyuser.S
++++ b/arch/arm/lib/csumpartialcopyuser.S
+@@ -57,8 +57,8 @@
+ * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
+ */
+
+-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
+-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
++#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
++#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
+
+ #include "csumpartialcopygeneric.S"
+
+diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
+index 8044591..c9b2609 100644
+--- a/arch/arm/lib/delay.c
++++ b/arch/arm/lib/delay.c
+@@ -29,7 +29,7 @@
+ /*
+ * Default to the loop-based delay implementation.
+ */
+-struct arm_delay_ops arm_delay_ops = {
++struct arm_delay_ops arm_delay_ops __read_only = {
+ .delay = __loop_delay,
+ .const_udelay = __loop_const_udelay,
+ .udelay = __loop_udelay,
+diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
+index 4b39af2..9ae747d 100644
+--- a/arch/arm/lib/uaccess_with_memcpy.c
++++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -85,7 +85,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
+ return 1;
+ }
+
+-static unsigned long noinline
++static unsigned long noinline __size_overflow(3)
+ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
+ {
+ int atomic;
+@@ -136,7 +136,7 @@ out:
+ }
+
+ unsigned long
+-__copy_to_user(void __user *to, const void *from, unsigned long n)
++___copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ /*
+ * This test is stubbed out of the main function above to keep
+@@ -150,7 +150,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ return __copy_to_user_memcpy(to, from, n);
+ }
+
+-static unsigned long noinline
++static unsigned long noinline __size_overflow(2)
+ __clear_user_memset(void __user *addr, unsigned long n)
+ {
+ if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
+@@ -190,7 +190,7 @@ out:
+ return n;
+ }
+
+-unsigned long __clear_user(void __user *addr, unsigned long n)
++unsigned long ___clear_user(void __user *addr, unsigned long n)
+ {
+ /* See rational for this in __copy_to_user() above. */
+ if (n < 64)
+diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
+index f572219..2cf36d5 100644
+--- a/arch/arm/mach-exynos/suspend.c
++++ b/arch/arm/mach-exynos/suspend.c
+@@ -732,8 +732,10 @@ void __init exynos_pm_init(void)
+ tmp |= pm_data->wake_disable_mask;
+ pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
+
+- exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
+- exynos_pm_syscore_ops.resume = pm_data->pm_resume;
++ pax_open_kernel();
++ *(void **)&exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
++ *(void **)&exynos_pm_syscore_ops.resume = pm_data->pm_resume;
++ pax_close_kernel();
+
+ register_syscore_ops(&exynos_pm_syscore_ops);
+ suspend_set_ops(&exynos_suspend_ops);
+diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
+index e46e9ea..9141c83 100644
+--- a/arch/arm/mach-mvebu/coherency.c
++++ b/arch/arm/mach-mvebu/coherency.c
+@@ -117,7 +117,7 @@ static void __init armada_370_coherency_init(struct device_node *np)
+
+ /*
+ * This ioremap hook is used on Armada 375/38x to ensure that PCIe
+- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
++ * memory areas are mapped as MT_UNCACHED_RW instead of MT_DEVICE. This
+ * is needed as a workaround for a deadlock issue between the PCIe
+ * interface and the cache controller.
+ */
+@@ -130,7 +130,7 @@ armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+ mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
+
+ if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
+- mtype = MT_UNCACHED;
++ mtype = MT_UNCACHED_RW;
+
+ return __arm_ioremap_caller(phys_addr, size, mtype, caller);
+ }
+diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
+index b6443a4..20a0b74 100644
+--- a/arch/arm/mach-omap2/board-n8x0.c
++++ b/arch/arm/mach-omap2/board-n8x0.c
+@@ -569,7 +569,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
+ }
+ #endif
+
+-struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
++struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
+ .late_init = n8x0_menelaus_late_init,
+ };
+
+diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+index 79f49d9..70bf184 100644
+--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
++++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+@@ -86,7 +86,7 @@ struct cpu_pm_ops {
+ void (*resume)(void);
+ void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
+ void (*hotplug_restart)(void);
+-};
++} __no_const;
+
+ static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
+ static struct powerdomain *mpuss_pd;
+@@ -105,7 +105,7 @@ static void dummy_cpu_resume(void)
+ static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
+ {}
+
+-struct cpu_pm_ops omap_pm_ops = {
++static struct cpu_pm_ops omap_pm_ops __read_only = {
+ .finish_suspend = default_finish_suspend,
+ .resume = dummy_cpu_resume,
+ .scu_prepare = dummy_scu_prepare,
+diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
+index 5305ec7..6d74045 100644
+--- a/arch/arm/mach-omap2/omap-smp.c
++++ b/arch/arm/mach-omap2/omap-smp.c
+@@ -19,6 +19,7 @@
+ #include <linux/device.h>
+ #include <linux/smp.h>
+ #include <linux/io.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+
+ #include <asm/smp_scu.h>
+diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
+index e1d2e99..d9b3177 100644
+--- a/arch/arm/mach-omap2/omap-wakeupgen.c
++++ b/arch/arm/mach-omap2/omap-wakeupgen.c
+@@ -330,7 +330,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __refdata irq_hotplug_notifier = {
++static struct notifier_block irq_hotplug_notifier = {
+ .notifier_call = irq_cpu_hotplug_notify,
+ };
+
+diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
+index 4cb8fd9..5ce65bc 100644
+--- a/arch/arm/mach-omap2/omap_device.c
++++ b/arch/arm/mach-omap2/omap_device.c
+@@ -504,7 +504,7 @@ void omap_device_delete(struct omap_device *od)
+ struct platform_device __init *omap_device_build(const char *pdev_name,
+ int pdev_id,
+ struct omap_hwmod *oh,
+- void *pdata, int pdata_len)
++ const void *pdata, int pdata_len)
+ {
+ struct omap_hwmod *ohs[] = { oh };
+
+@@ -532,7 +532,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
+ struct platform_device __init *omap_device_build_ss(const char *pdev_name,
+ int pdev_id,
+ struct omap_hwmod **ohs,
+- int oh_cnt, void *pdata,
++ int oh_cnt, const void *pdata,
+ int pdata_len)
+ {
+ int ret = -ENOMEM;
+diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
+index 78c02b3..c94109a 100644
+--- a/arch/arm/mach-omap2/omap_device.h
++++ b/arch/arm/mach-omap2/omap_device.h
+@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
+ /* Core code interface */
+
+ struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
+- struct omap_hwmod *oh, void *pdata,
++ struct omap_hwmod *oh, const void *pdata,
+ int pdata_len);
+
+ struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
+ struct omap_hwmod **oh, int oh_cnt,
+- void *pdata, int pdata_len);
++ const void *pdata, int pdata_len);
+
+ struct omap_device *omap_device_alloc(struct platform_device *pdev,
+ struct omap_hwmod **ohs, int oh_cnt);
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 486cc4d..8d1a0b7 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -199,10 +199,10 @@ struct omap_hwmod_soc_ops {
+ int (*init_clkdm)(struct omap_hwmod *oh);
+ void (*update_context_lost)(struct omap_hwmod *oh);
+ int (*get_context_lost)(struct omap_hwmod *oh);
+-};
++} __no_const;
+
+ /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
+-static struct omap_hwmod_soc_ops soc_ops;
++static struct omap_hwmod_soc_ops soc_ops __read_only;
+
+ /* omap_hwmod_list contains all registered struct omap_hwmods */
+ static LIST_HEAD(omap_hwmod_list);
+diff --git a/arch/arm/mach-omap2/powerdomains43xx_data.c b/arch/arm/mach-omap2/powerdomains43xx_data.c
+index 95fee54..cfa9cf1 100644
+--- a/arch/arm/mach-omap2/powerdomains43xx_data.c
++++ b/arch/arm/mach-omap2/powerdomains43xx_data.c
+@@ -10,6 +10,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/init.h>
++#include <asm/pgtable.h>
+
+ #include "powerdomain.h"
+
+@@ -129,7 +130,9 @@ static int am43xx_check_vcvp(void)
+
+ void __init am43xx_powerdomains_init(void)
+ {
+- omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
++ pax_open_kernel();
++ *(void **)&omap4_pwrdm_operations.pwrdm_has_voltdm = am43xx_check_vcvp;
++ pax_close_kernel();
+ pwrdm_register_platform_funcs(&omap4_pwrdm_operations);
+ pwrdm_register_pwrdms(powerdomains_am43xx);
+ pwrdm_complete_init();
+diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
+index ff0a68c..b312aa0 100644
+--- a/arch/arm/mach-omap2/wd_timer.c
++++ b/arch/arm/mach-omap2/wd_timer.c
+@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
+ struct omap_hwmod *oh;
+ char *oh_name = "wd_timer2";
+ char *dev_name = "omap_wdt";
+- struct omap_wd_timer_platform_data pdata;
++ static struct omap_wd_timer_platform_data pdata = {
++ .read_reset_sources = prm_read_reset_sources
++ };
+
+ if (!cpu_class_is_omap2() || of_have_populated_dt())
+ return 0;
+@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
+ return -EINVAL;
+ }
+
+- pdata.read_reset_sources = prm_read_reset_sources;
+-
+ pdev = omap_device_build(dev_name, id, oh, &pdata,
+ sizeof(struct omap_wd_timer_platform_data));
+ WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
+diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
+index b0790fc..71eb21f 100644
+--- a/arch/arm/mach-shmobile/platsmp-apmu.c
++++ b/arch/arm/mach-shmobile/platsmp-apmu.c
+@@ -22,6 +22,7 @@
+ #include <asm/proc-fns.h>
+ #include <asm/smp_plat.h>
+ #include <asm/suspend.h>
++#include <asm/pgtable.h>
+ #include "common.h"
+ #include "platsmp-apmu.h"
+
+@@ -233,6 +234,8 @@ static int shmobile_smp_apmu_enter_suspend(suspend_state_t state)
+
+ void __init shmobile_smp_apmu_suspend_init(void)
+ {
+- shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
++ pax_open_kernel();
++ *(void **)&shmobile_suspend_ops.enter = shmobile_smp_apmu_enter_suspend;
++ pax_close_kernel();
+ }
+ #endif
+diff --git a/arch/arm/mach-shmobile/pm-r8a7740.c b/arch/arm/mach-shmobile/pm-r8a7740.c
+index 34608fc..344d7c0 100644
+--- a/arch/arm/mach-shmobile/pm-r8a7740.c
++++ b/arch/arm/mach-shmobile/pm-r8a7740.c
+@@ -11,6 +11,7 @@
+ #include <linux/console.h>
+ #include <linux/io.h>
+ #include <linux/suspend.h>
++#include <asm/pgtable.h>
+
+ #include "common.h"
+ #include "pm-rmobile.h"
+@@ -117,7 +118,9 @@ static int r8a7740_enter_suspend(suspend_state_t suspend_state)
+
+ static void r8a7740_suspend_init(void)
+ {
+- shmobile_suspend_ops.enter = r8a7740_enter_suspend;
++ pax_open_kernel();
++ *(void **)&shmobile_suspend_ops.enter = r8a7740_enter_suspend;
++ pax_close_kernel();
+ }
+ #else
+ static void r8a7740_suspend_init(void) {}
+diff --git a/arch/arm/mach-shmobile/pm-sh73a0.c b/arch/arm/mach-shmobile/pm-sh73a0.c
+index a7e4668..83334f33 100644
+--- a/arch/arm/mach-shmobile/pm-sh73a0.c
++++ b/arch/arm/mach-shmobile/pm-sh73a0.c
+@@ -9,6 +9,7 @@
+ */
+
+ #include <linux/suspend.h>
++#include <asm/pgtable.h>
+ #include "common.h"
+
+ #ifdef CONFIG_SUSPEND
+@@ -20,7 +21,9 @@ static int sh73a0_enter_suspend(suspend_state_t suspend_state)
+
+ static void sh73a0_suspend_init(void)
+ {
+- shmobile_suspend_ops.enter = sh73a0_enter_suspend;
++ pax_open_kernel();
++ *(void **)&shmobile_suspend_ops.enter = sh73a0_enter_suspend;
++ pax_close_kernel();
+ }
+ #else
+ static void sh73a0_suspend_init(void) {}
+diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
+index 7469347..1ecc350 100644
+--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
++++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
+@@ -177,7 +177,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
+ bool entered_lp2 = false;
+
+ if (tegra_pending_sgi())
+- ACCESS_ONCE(abort_flag) = true;
++ ACCESS_ONCE_RW(abort_flag) = true;
+
+ cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
+
+diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
+index 3b9098d..15b390f 100644
+--- a/arch/arm/mach-tegra/irq.c
++++ b/arch/arm/mach-tegra/irq.c
+@@ -20,6 +20,7 @@
+ #include <linux/cpu_pm.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include <linux/irq.h>
+ #include <linux/kernel.h>
+diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
+index 8538910..2f39bc4 100644
+--- a/arch/arm/mach-ux500/pm.c
++++ b/arch/arm/mach-ux500/pm.c
+@@ -10,6 +10,7 @@
+ */
+
+ #include <linux/kernel.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
+index f66816c..228b951 100644
+--- a/arch/arm/mach-zynq/platsmp.c
++++ b/arch/arm/mach-zynq/platsmp.c
+@@ -24,6 +24,7 @@
+ #include <linux/io.h>
+ #include <asm/cacheflush.h>
+ #include <asm/smp_scu.h>
++#include <linux/irq.h>
+ #include <linux/irqchip/arm-gic.h>
+ #include "common.h"
+
+diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
+index 7c6b976..055db09 100644
+--- a/arch/arm/mm/Kconfig
++++ b/arch/arm/mm/Kconfig
+@@ -446,6 +446,7 @@ config CPU_32v5
+
+ config CPU_32v6
+ bool
++ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ select TLS_REG_EMUL if !CPU_32v6K && !MMU
+
+ config CPU_32v6K
+@@ -600,6 +601,7 @@ config CPU_CP15_MPU
+
+ config CPU_USE_DOMAINS
+ bool
++ depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ help
+ This option enables or disables the use of domain switching
+ via the set_fs() function.
+@@ -818,7 +820,7 @@ config NEED_KUSER_HELPERS
+
+ config KUSER_HELPERS
+ bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+- depends on MMU
++ depends on MMU && (!(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND)
+ default y
+ help
+ Warning: disabling this option may break user programs.
+@@ -832,7 +834,7 @@ config KUSER_HELPERS
+ See Documentation/arm/kernel_user_helpers.txt for details.
+
+ However, the fixed address nature of these helpers can be used
+- by ROP (return orientated programming) authors when creating
++ by ROP (Return Oriented Programming) authors when creating
+ exploits.
+
+ If all of the binaries and libraries which run on your platform
+diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
+index 9769f1e..16aaa55 100644
+--- a/arch/arm/mm/alignment.c
++++ b/arch/arm/mm/alignment.c
+@@ -216,10 +216,12 @@ union offset_union {
+ #define __get16_unaligned_check(ins,val,addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
++ pax_open_userland(); \
+ __get8_unaligned_check(ins,v,a,err); \
+ val = v << ((BE) ? 8 : 0); \
+ __get8_unaligned_check(ins,v,a,err); \
+ val |= v << ((BE) ? 0 : 8); \
++ pax_close_userland(); \
+ if (err) \
+ goto fault; \
+ } while (0)
+@@ -233,6 +235,7 @@ union offset_union {
+ #define __get32_unaligned_check(ins,val,addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
++ pax_open_userland(); \
+ __get8_unaligned_check(ins,v,a,err); \
+ val = v << ((BE) ? 24 : 0); \
+ __get8_unaligned_check(ins,v,a,err); \
+@@ -241,6 +244,7 @@ union offset_union {
+ val |= v << ((BE) ? 8 : 16); \
+ __get8_unaligned_check(ins,v,a,err); \
+ val |= v << ((BE) ? 0 : 24); \
++ pax_close_userland(); \
+ if (err) \
+ goto fault; \
+ } while (0)
+@@ -254,6 +258,7 @@ union offset_union {
+ #define __put16_unaligned_check(ins,val,addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr; \
++ pax_open_userland(); \
+ __asm__( FIRST_BYTE_16 \
+ ARM( "1: "ins" %1, [%2], #1\n" ) \
+ THUMB( "1: "ins" %1, [%2]\n" ) \
+@@ -273,6 +278,7 @@ union offset_union {
+ " .popsection\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
++ pax_close_userland(); \
+ if (err) \
+ goto fault; \
+ } while (0)
+@@ -286,6 +292,7 @@ union offset_union {
+ #define __put32_unaligned_check(ins,val,addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr; \
++ pax_open_userland(); \
+ __asm__( FIRST_BYTE_32 \
+ ARM( "1: "ins" %1, [%2], #1\n" ) \
+ THUMB( "1: "ins" %1, [%2]\n" ) \
+@@ -315,6 +322,7 @@ union offset_union {
+ " .popsection\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
++ pax_close_userland(); \
+ if (err) \
+ goto fault; \
+ } while (0)
+diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
+index 71b3d33..8af9ade 100644
+--- a/arch/arm/mm/cache-l2x0.c
++++ b/arch/arm/mm/cache-l2x0.c
+@@ -44,7 +44,7 @@ struct l2c_init_data {
+ void (*configure)(void __iomem *);
+ void (*unlock)(void __iomem *, unsigned);
+ struct outer_cache_fns outer_cache;
+-};
++} __do_const;
+
+ #define CACHE_LINE_SIZE 32
+
+diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
+index 845769e..4278fd7 100644
+--- a/arch/arm/mm/context.c
++++ b/arch/arm/mm/context.c
+@@ -43,7 +43,7 @@
+ #define NUM_USER_ASIDS ASID_FIRST_VERSION
+
+ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+-static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
++static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
+ static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
+
+ static DEFINE_PER_CPU(atomic64_t, active_asids);
+@@ -178,7 +178,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+ {
+ static u32 cur_idx = 1;
+ u64 asid = atomic64_read(&mm->context.id);
+- u64 generation = atomic64_read(&asid_generation);
++ u64 generation = atomic64_read_unchecked(&asid_generation);
+
+ if (asid != 0) {
+ /*
+@@ -208,7 +208,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+ */
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
+ if (asid == NUM_USER_ASIDS) {
+- generation = atomic64_add_return(ASID_FIRST_VERSION,
++ generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
+ &asid_generation);
+ flush_context(cpu);
+ asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
+@@ -240,14 +240,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
+ cpu_set_reserved_ttbr0();
+
+ asid = atomic64_read(&mm->context.id);
+- if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
++ if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
+ && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
+ goto switch_mm_fastpath;
+
+ raw_spin_lock_irqsave(&cpu_asid_lock, flags);
+ /* Check that our ASID belongs to the current generation. */
+ asid = atomic64_read(&mm->context.id);
+- if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
++ if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
+ asid = new_context(mm, cpu);
+ atomic64_set(&mm->context.id, asid);
+ }
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 0d629b8..01867c8 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -25,6 +25,7 @@
+ #include <asm/system_misc.h>
+ #include <asm/system_info.h>
+ #include <asm/tlbflush.h>
++#include <asm/sections.h>
+
+ #include "fault.h"
+
+@@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
+ if (fixup_exception(regs))
+ return;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (addr < TASK_SIZE) {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ }
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if ((fsr & FSR_WRITE) &&
++ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
++ (MODULES_VADDR <= addr && addr < MODULES_END)))
++ {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
++ }
++#endif
++
+ /*
+ * No handler, we'll have to terminate things with extreme prejudice.
+ */
+@@ -173,6 +199,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (fsr & FSR_LNX_PF) {
++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ tsk->thread.address = addr;
+ tsk->thread.error_code = fsr;
+ tsk->thread.trap_no = 14;
+@@ -400,6 +433,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ }
+ #endif /* CONFIG_MMU */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (__force unsigned char __user *)pc+i))
++ printk(KERN_CONT "?? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP-4: ");
++ for (i = -1; i < 20; i++) {
++ unsigned long c;
++ if (get_user(c, (__force unsigned long __user *)sp+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08lx ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * First Level Translation Fault Handler
+ *
+@@ -547,9 +607,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
+ struct siginfo info;
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
++ goto die;
++ }
++#endif
++
+ if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
+ return;
+
++die:
+ pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
+ inf->name, fsr, addr);
+ show_pte(current->mm, addr);
+@@ -574,15 +647,104 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
+ ifsr_info[nr].name = name;
+ }
+
++asmlinkage int sys_sigreturn(struct pt_regs *regs);
++asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
++
+ asmlinkage void __exception
+ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
+ {
+ const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+ struct siginfo info;
++ unsigned long pc = instruction_pointer(regs);
++
++ if (user_mode(regs)) {
++ unsigned long sigpage = current->mm->context.sigpage;
++
++ if (sigpage <= pc && pc < sigpage + 7*4) {
++ if (pc < sigpage + 3*4)
++ sys_sigreturn(regs);
++ else
++ sys_rt_sigreturn(regs);
++ return;
++ }
++ if (pc == 0xffff0f60UL) {
++ /*
++ * PaX: __kuser_cmpxchg64 emulation
++ */
++ // TODO
++ //regs->ARM_pc = regs->ARM_lr;
++ //return;
++ }
++ if (pc == 0xffff0fa0UL) {
++ /*
++ * PaX: __kuser_memory_barrier emulation
++ */
++ // dmb(); implied by the exception
++ regs->ARM_pc = regs->ARM_lr;
++ return;
++ }
++ if (pc == 0xffff0fc0UL) {
++ /*
++ * PaX: __kuser_cmpxchg emulation
++ */
++ // TODO
++ //long new;
++ //int op;
++
++ //op = FUTEX_OP_SET << 28;
++ //new = futex_atomic_op_inuser(op, regs->ARM_r2);
++ //regs->ARM_r0 = old != new;
++ //regs->ARM_pc = regs->ARM_lr;
++ //return;
++ }
++ if (pc == 0xffff0fe0UL) {
++ /*
++ * PaX: __kuser_get_tls emulation
++ */
++ regs->ARM_r0 = current_thread_info()->tp_value[0];
++ regs->ARM_pc = regs->ARM_lr;
++ return;
++ }
++ }
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
++ if (current->signal->curr_ip)
++ printk(KERN_EMERG "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
++ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
++ else
++ printk(KERN_EMERG "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
++ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
++ pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
++ goto die;
++ }
++#endif
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
++#ifdef CONFIG_THUMB2_KERNEL
++ unsigned short bkpt;
++
++ if (!probe_kernel_address(pc, bkpt) && cpu_to_le16(bkpt) == 0xbef1) {
++#else
++ unsigned int bkpt;
++
++ if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
++#endif
++ current->thread.error_code = ifsr;
++ current->thread.trap_no = 0;
++ pax_report_refcount_overflow(regs);
++ fixup_exception(regs);
++ return;
++ }
++ }
++#endif
+
+ if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+ return;
+
++die:
+ pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+ inf->name, ifsr, addr);
+
+diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
+index cf08bdf..772656c 100644
+--- a/arch/arm/mm/fault.h
++++ b/arch/arm/mm/fault.h
+@@ -3,6 +3,7 @@
+
+ /*
+ * Fault status register encodings. We steal bit 31 for our own purposes.
++ * Set when the FSR value is from an instruction fault.
+ */
+ #define FSR_LNX_PF (1 << 31)
+ #define FSR_WRITE (1 << 11)
+@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
+ }
+ #endif
+
++/* valid for LPAE and !LPAE */
++static inline int is_xn_fault(unsigned int fsr)
++{
++ return ((fsr_fs(fsr) & 0x3c) == 0xc);
++}
++
++static inline int is_domain_fault(unsigned int fsr)
++{
++ return ((fsr_fs(fsr) & 0xD) == 0x9);
++}
++
+ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
+ unsigned long search_exception_table(unsigned long addr);
+
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index 8a63b4c..6b04370 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -710,7 +710,46 @@ void free_tcmmem(void)
+ {
+ #ifdef CONFIG_HAVE_TCM
+ extern char __tcm_start, __tcm_end;
++#endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long addr;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ int cpu_arch = cpu_architecture();
++ unsigned int cr = get_cr();
++
++ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
++ /* make pages tables, etc before .text NX */
++ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ __section_update(pmd, addr, PMD_SECT_XN);
++ }
++ /* make init NX */
++ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ __section_update(pmd, addr, PMD_SECT_XN);
++ }
++ /* make kernel code/rodata RX */
++ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++#ifdef CONFIG_ARM_LPAE
++ __section_update(pmd, addr, PMD_SECT_RDONLY);
++#else
++ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
++#endif
++ }
++ }
++#endif
++
++#ifdef CONFIG_HAVE_TCM
+ poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
+ free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
+ #endif
+diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
+index 0c81056..97279f7 100644
+--- a/arch/arm/mm/ioremap.c
++++ b/arch/arm/mm/ioremap.c
+@@ -405,9 +405,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
+ unsigned int mtype;
+
+ if (cached)
+- mtype = MT_MEMORY_RWX;
++ mtype = MT_MEMORY_RX;
+ else
+- mtype = MT_MEMORY_RWX_NONCACHED;
++ mtype = MT_MEMORY_RX_NONCACHED;
+
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index 407dc78..047ce9d 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct vm_area_struct *vma;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ /*
+@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ unsigned long addr = addr0;
+ int do_align = 0;
+ int aliasing = cache_is_vipt_aliasing();
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ /*
+@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ return addr;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ if (do_align)
+@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ else
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ /*
+@@ -183,14 +193,30 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 870838a..070df1d 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -41,6 +41,22 @@
+ #include "mm.h"
+ #include "tcm.h"
+
++#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++void modify_domain(unsigned int dom, unsigned int type)
++{
++ struct thread_info *thread = current_thread_info();
++ unsigned int domain = thread->cpu_domain;
++ /*
++ * DOMAIN_MANAGER might be defined to some other value,
++ * use the arch-defined constant
++ */
++ domain &= ~domain_val(dom, 3);
++ thread->cpu_domain = domain | domain_val(dom, type);
++ set_domain(thread->cpu_domain);
++}
++EXPORT_SYMBOL(modify_domain);
++#endif
++
+ /*
+ * empty_zero_page is a special page that is used for
+ * zero-initialized data and COW.
+@@ -242,7 +258,15 @@ __setup("noalign", noalign_setup);
+ #define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
+ #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
+
+-static struct mem_type mem_types[] = {
++#ifdef CONFIG_PAX_KERNEXEC
++#define L_PTE_KERNEXEC L_PTE_RDONLY
++#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
++#else
++#define L_PTE_KERNEXEC L_PTE_DIRTY
++#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
++#endif
++
++static struct mem_type mem_types[] __read_only = {
+ [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
+ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
+ L_PTE_SHARED,
+@@ -271,19 +295,19 @@ static struct mem_type mem_types[] = {
+ .prot_sect = PROT_SECT_DEVICE,
+ .domain = DOMAIN_IO,
+ },
+- [MT_UNCACHED] = {
++ [MT_UNCACHED_RW] = {
+ .prot_pte = PROT_PTE_DEVICE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+ .domain = DOMAIN_IO,
+ },
+- [MT_CACHECLEAN] = {
+- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
++ [MT_CACHECLEAN_RO] = {
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_RDONLY,
+ .domain = DOMAIN_KERNEL,
+ },
+ #ifndef CONFIG_ARM_LPAE
+- [MT_MINICLEAN] = {
+- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
++ [MT_MINICLEAN_RO] = {
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_XN | PMD_SECT_RDONLY,
+ .domain = DOMAIN_KERNEL,
+ },
+ #endif
+@@ -291,15 +315,15 @@ static struct mem_type mem_types[] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_RDONLY,
+ .prot_l1 = PMD_TYPE_TABLE,
+- .domain = DOMAIN_USER,
++ .domain = DOMAIN_VECTORS,
+ },
+ [MT_HIGH_VECTORS] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_USER | L_PTE_RDONLY,
+ .prot_l1 = PMD_TYPE_TABLE,
+- .domain = DOMAIN_USER,
++ .domain = DOMAIN_VECTORS,
+ },
+- [MT_MEMORY_RWX] = {
++ [__MT_MEMORY_RWX] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+@@ -312,17 +336,30 @@ static struct mem_type mem_types[] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
+- [MT_ROM] = {
+- .prot_sect = PMD_TYPE_SECT,
++ [MT_MEMORY_RX] = {
++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
++ .prot_l1 = PMD_TYPE_TABLE,
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
++ .domain = DOMAIN_KERNEL,
++ },
++ [MT_ROM_RX] = {
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
+ .domain = DOMAIN_KERNEL,
+ },
+- [MT_MEMORY_RWX_NONCACHED] = {
++ [MT_MEMORY_RW_NONCACHED] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_MT_BUFFERABLE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+ .domain = DOMAIN_KERNEL,
+ },
++ [MT_MEMORY_RX_NONCACHED] = {
++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
++ L_PTE_MT_BUFFERABLE,
++ .prot_l1 = PMD_TYPE_TABLE,
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
++ .domain = DOMAIN_KERNEL,
++ },
+ [MT_MEMORY_RW_DTCM] = {
+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
+ L_PTE_XN,
+@@ -330,9 +367,10 @@ static struct mem_type mem_types[] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
+ },
+- [MT_MEMORY_RWX_ITCM] = {
+- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
++ [MT_MEMORY_RX_ITCM] = {
++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
+ .prot_l1 = PMD_TYPE_TABLE,
++ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
+ .domain = DOMAIN_KERNEL,
+ },
+ [MT_MEMORY_RW_SO] = {
+@@ -544,9 +582,14 @@ static void __init build_mem_type_table(void)
+ * Mark cache clean areas and XIP ROM read only
+ * from SVC mode and no access from userspace.
+ */
+- mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+- mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_ROM_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++#ifdef CONFIG_PAX_KERNEXEC
++ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_MEMORY_RX_ITCM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++#endif
++ mem_types[MT_MINICLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
+ #endif
+
+ /*
+@@ -563,13 +606,17 @@ static void __init build_mem_type_table(void)
+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
+- mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
+- mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
++ mem_types[__MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
++ mem_types[__MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
++ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
++ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_S;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_pte |= L_PTE_SHARED;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_S;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
+ }
+
+@@ -580,15 +627,20 @@ static void __init build_mem_type_table(void)
+ if (cpu_arch >= CPU_ARCH_ARMv6) {
+ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+ /* Non-cacheable Normal is XCB = 001 */
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
++ PMD_SECT_BUFFERED;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
+ PMD_SECT_BUFFERED;
+ } else {
+ /* For both ARMv6 and non-TEX-remapping ARMv7 */
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |=
++ PMD_SECT_TEX(1);
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |=
+ PMD_SECT_TEX(1);
+ }
+ } else {
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
+ }
+
+ #ifdef CONFIG_ARM_LPAE
+@@ -609,6 +661,8 @@ static void __init build_mem_type_table(void)
+ user_pgprot |= PTE_EXT_PXN;
+ #endif
+
++ user_pgprot |= __supported_pte_mask;
++
+ for (i = 0; i < 16; i++) {
+ pteval_t v = pgprot_val(protection_map[i]);
+ protection_map[i] = __pgprot(v | user_pgprot);
+@@ -626,21 +680,24 @@ static void __init build_mem_type_table(void)
+
+ mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
+ mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
+- mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
+- mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
++ mem_types[__MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
++ mem_types[__MT_MEMORY_RWX].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
++ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
++ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
+- mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
+- mem_types[MT_ROM].prot_sect |= cp->pmd;
++ mem_types[MT_MEMORY_RW_NONCACHED].prot_sect |= ecc_mask;
++ mem_types[MT_MEMORY_RX_NONCACHED].prot_sect |= ecc_mask;
++ mem_types[MT_ROM_RX].prot_sect |= cp->pmd;
+
+ switch (cp->pmd) {
+ case PMD_SECT_WT:
+- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WT;
+ break;
+ case PMD_SECT_WB:
+ case PMD_SECT_WBWA:
+- mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
++ mem_types[MT_CACHECLEAN_RO].prot_sect |= PMD_SECT_WB;
+ break;
+ }
+ pr_info("Memory policy: %sData cache %s\n",
+@@ -854,7 +911,7 @@ static void __init create_mapping(struct map_desc *md)
+ return;
+ }
+
+- if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
++ if ((md->type == MT_DEVICE || md->type == MT_ROM_RX) &&
+ md->virtual >= PAGE_OFFSET &&
+ (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
+ pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
+@@ -1224,18 +1281,15 @@ void __init arm_mm_memblock_reserve(void)
+ * called function. This means you can't use any function or debugging
+ * method which may touch any device, otherwise the kernel _will_ crash.
+ */
++
++static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
++
+ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ {
+ struct map_desc map;
+ unsigned long addr;
+- void *vectors;
+
+- /*
+- * Allocate the vector page early.
+- */
+- vectors = early_alloc(PAGE_SIZE * 2);
+-
+- early_trap_init(vectors);
++ early_trap_init(&vectors);
+
+ for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+ pmd_clear(pmd_off_k(addr));
+@@ -1248,7 +1302,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
+ map.virtual = MODULES_VADDR;
+ map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
+- map.type = MT_ROM;
++ map.type = MT_ROM_RX;
+ create_mapping(&map);
+ #endif
+
+@@ -1259,14 +1313,14 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
+ map.virtual = FLUSH_BASE;
+ map.length = SZ_1M;
+- map.type = MT_CACHECLEAN;
++ map.type = MT_CACHECLEAN_RO;
+ create_mapping(&map);
+ #endif
+ #ifdef FLUSH_BASE_MINICACHE
+ map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
+ map.virtual = FLUSH_BASE_MINICACHE;
+ map.length = SZ_1M;
+- map.type = MT_MINICLEAN;
++ map.type = MT_MINICLEAN_RO;
+ create_mapping(&map);
+ #endif
+
+@@ -1275,7 +1329,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
+ * location (0xffff0000). If we aren't using high-vectors, also
+ * create a mapping at the low-vectors virtual address.
+ */
+- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
++ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
+ map.virtual = 0xffff0000;
+ map.length = PAGE_SIZE;
+ #ifdef CONFIG_KUSER_HELPERS
+@@ -1335,8 +1389,10 @@ static void __init kmap_init(void)
+ static void __init map_lowmem(void)
+ {
+ struct memblock_region *reg;
++#ifndef CONFIG_PAX_KERNEXEC
+ phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+ phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
++#endif
+
+ /* Map all the lowmem memory banks. */
+ for_each_memblock(memory, reg) {
+@@ -1349,11 +1405,48 @@ static void __init map_lowmem(void)
+ if (start >= end)
+ break;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ map.pfn = __phys_to_pfn(start);
++ map.virtual = __phys_to_virt(start);
++ map.length = end - start;
++
++ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
++ struct map_desc kernel;
++ struct map_desc initmap;
++
++ /* when freeing initmem we will make this RW */
++ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
++ initmap.virtual = (unsigned long)__init_begin;
++ initmap.length = _sdata - __init_begin;
++ initmap.type = __MT_MEMORY_RWX;
++ create_mapping(&initmap);
++
++ /* when freeing initmem we will make this RX */
++ kernel.pfn = __phys_to_pfn(__pa(_stext));
++ kernel.virtual = (unsigned long)_stext;
++ kernel.length = __init_begin - _stext;
++ kernel.type = __MT_MEMORY_RWX;
++ create_mapping(&kernel);
++
++ if (map.virtual < (unsigned long)_stext) {
++ map.length = (unsigned long)_stext - map.virtual;
++ map.type = __MT_MEMORY_RWX;
++ create_mapping(&map);
++ }
++
++ map.pfn = __phys_to_pfn(__pa(_sdata));
++ map.virtual = (unsigned long)_sdata;
++ map.length = end - __pa(_sdata);
++ }
++
++ map.type = MT_MEMORY_RW;
++ create_mapping(&map);
++#else
+ if (end < kernel_x_start) {
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+- map.type = MT_MEMORY_RWX;
++ map.type = __MT_MEMORY_RWX;
+
+ create_mapping(&map);
+ } else if (start >= kernel_x_end) {
+@@ -1377,7 +1470,7 @@ static void __init map_lowmem(void)
+ map.pfn = __phys_to_pfn(kernel_x_start);
+ map.virtual = __phys_to_virt(kernel_x_start);
+ map.length = kernel_x_end - kernel_x_start;
+- map.type = MT_MEMORY_RWX;
++ map.type = __MT_MEMORY_RWX;
+
+ create_mapping(&map);
+
+@@ -1390,6 +1483,7 @@ static void __init map_lowmem(void)
+ create_mapping(&map);
+ }
+ }
++#endif
+ }
+ }
+
+diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
+index c011e22..92a0260 100644
+--- a/arch/arm/net/bpf_jit_32.c
++++ b/arch/arm/net/bpf_jit_32.c
+@@ -20,6 +20,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/hwcap.h>
+ #include <asm/opcodes.h>
++#include <asm/pgtable.h>
+
+ #include "bpf_jit_32.h"
+
+@@ -72,54 +73,38 @@ struct jit_ctx {
+ #endif
+ };
+
++#ifdef CONFIG_GRKERNSEC_BPF_HARDEN
++int bpf_jit_enable __read_only;
++#else
+ int bpf_jit_enable __read_mostly;
++#endif
+
+-static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
+- unsigned int size)
+-{
+- void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
+-
+- if (!ptr)
+- return -EFAULT;
+- memcpy(ret, ptr, size);
+- return 0;
+-}
+-
+-static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
+ {
+ u8 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 1);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 1);
++ err = skb_copy_bits(skb, offset, &ret, 1);
+
+ return (u64)err << 32 | ret;
+ }
+
+-static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
+ {
+ u16 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 2);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 2);
++ err = skb_copy_bits(skb, offset, &ret, 2);
+
+ return (u64)err << 32 | ntohs(ret);
+ }
+
+-static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
++static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
+ {
+ u32 ret;
+ int err;
+
+- if (offset < 0)
+- err = call_neg_helper(skb, offset, &ret, 4);
+- else
+- err = skb_copy_bits(skb, offset, &ret, 4);
++ err = skb_copy_bits(skb, offset, &ret, 4);
+
+ return (u64)err << 32 | ntohl(ret);
+ }
+@@ -199,8 +184,10 @@ static void jit_fill_hole(void *area, unsigned int size)
+ {
+ u32 *ptr;
+ /* We are guaranteed to have aligned memory. */
++ pax_open_kernel();
+ for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
+ *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
++ pax_close_kernel();
+ }
+
+ static void build_prologue(struct jit_ctx *ctx)
+@@ -556,6 +543,9 @@ static int build_body(struct jit_ctx *ctx)
+ case BPF_LD | BPF_B | BPF_ABS:
+ load_order = 0;
+ load:
++ /* the interpreter will deal with the negative K */
++ if ((int)k < 0)
++ return -ENOTSUPP;
+ emit_mov_i(r_off, k, ctx);
+ load_common:
+ ctx->seen |= SEEN_DATA | SEEN_CALL;
+@@ -570,18 +560,6 @@ load_common:
+ condt = ARM_COND_HI;
+ }
+
+- /*
+- * test for negative offset, only if we are
+- * currently scheduled to take the fast
+- * path. this will update the flags so that
+- * the slowpath instruction are ignored if the
+- * offset is negative.
+- *
+- * for loard_order == 0 the HI condition will
+- * make loads at offset 0 take the slow path too.
+- */
+- _emit(condt, ARM_CMP_I(r_off, 0), ctx);
+-
+ _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
+ ctx);
+
+diff --git a/arch/arm/plat-iop/setup.c b/arch/arm/plat-iop/setup.c
+index 5b217f4..c23f40e 100644
+--- a/arch/arm/plat-iop/setup.c
++++ b/arch/arm/plat-iop/setup.c
+@@ -24,7 +24,7 @@ static struct map_desc iop3xx_std_desc[] __initdata = {
+ .virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
+ .pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
+ .length = IOP3XX_PERIPHERAL_SIZE,
+- .type = MT_UNCACHED,
++ .type = MT_UNCACHED_RW,
+ },
+ };
+
+diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
+index a5bc92d..0bb4730 100644
+--- a/arch/arm/plat-omap/sram.c
++++ b/arch/arm/plat-omap/sram.c
+@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
+ * Looks like we need to preserve some bootloader code at the
+ * beginning of SRAM for jumping to flash for reboot to work...
+ */
++ pax_open_kernel();
+ memset_io(omap_sram_base + omap_sram_skip, 0,
+ omap_sram_size - omap_sram_skip);
++ pax_close_kernel();
+ }
+diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
+index d6285ef..b684dac 100644
+--- a/arch/arm64/Kconfig.debug
++++ b/arch/arm64/Kconfig.debug
+@@ -10,6 +10,7 @@ config ARM64_PTDUMP
+ bool "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ help
+ Say Y here if you want to show the kernel pagetable layout in a
+ debugfs file. This information is only useful for kernel developers
+diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
+index 7047051..44e8675 100644
+--- a/arch/arm64/include/asm/atomic.h
++++ b/arch/arm64/include/asm/atomic.h
+@@ -252,5 +252,15 @@ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif
+ #endif
+diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
+index 0fa47c4..b167938 100644
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -44,7 +44,7 @@
+ do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+- ACCESS_ONCE(*p) = (v); \
++ ACCESS_ONCE_RW(*p) = (v); \
+ } while (0)
+
+ #define smp_load_acquire(p) \
+diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
+index 4fde8c1..441f84f 100644
+--- a/arch/arm64/include/asm/percpu.h
++++ b/arch/arm64/include/asm/percpu.h
+@@ -135,16 +135,16 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
+ {
+ switch (size) {
+ case 1:
+- ACCESS_ONCE(*(u8 *)ptr) = (u8)val;
++ ACCESS_ONCE_RW(*(u8 *)ptr) = (u8)val;
+ break;
+ case 2:
+- ACCESS_ONCE(*(u16 *)ptr) = (u16)val;
++ ACCESS_ONCE_RW(*(u16 *)ptr) = (u16)val;
+ break;
+ case 4:
+- ACCESS_ONCE(*(u32 *)ptr) = (u32)val;
++ ACCESS_ONCE_RW(*(u32 *)ptr) = (u32)val;
+ break;
+ case 8:
+- ACCESS_ONCE(*(u64 *)ptr) = (u64)val;
++ ACCESS_ONCE_RW(*(u64 *)ptr) = (u64)val;
+ break;
+ default:
+ BUILD_BUG();
+diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
+index 7642056..bffc904 100644
+--- a/arch/arm64/include/asm/pgalloc.h
++++ b/arch/arm64/include/asm/pgalloc.h
+@@ -46,6 +46,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
++
+ #endif /* CONFIG_PGTABLE_LEVELS > 2 */
+
+ #if CONFIG_PGTABLE_LEVELS > 3
+diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
+index 07e1ba44..ec8cbbb 100644
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -99,6 +99,7 @@ static inline void set_fs(mm_segment_t fs)
+ flag; \
+ })
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) __range_ok(addr, size)
+ #define user_addr_max get_fs
+
+diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
+index d16a1ce..a5acc60 100644
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -134,7 +134,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
+ phys_to_page(paddr),
+ size >> PAGE_SHIFT);
+ if (!freed)
+- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
++ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
+ }
+
+ static void *__dma_alloc(struct device *dev, size_t size,
+diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
+index c3a58a1..78fbf54 100644
+--- a/arch/avr32/include/asm/cache.h
++++ b/arch/avr32/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef __ASM_AVR32_CACHE_H
+ #define __ASM_AVR32_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
+index 0388ece..87c8df1 100644
+--- a/arch/avr32/include/asm/elf.h
++++ b/arch/avr32/include/asm/elf.h
+@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
+index 479330b..53717a8 100644
+--- a/arch/avr32/include/asm/kmap_types.h
++++ b/arch/avr32/include/asm/kmap_types.h
+@@ -2,9 +2,9 @@
+ #define __ASM_AVR32_KMAP_TYPES_H
+
+ #ifdef CONFIG_DEBUG_HIGHMEM
+-# define KM_TYPE_NR 29
++# define KM_TYPE_NR 30
+ #else
+-# define KM_TYPE_NR 14
++# define KM_TYPE_NR 15
+ #endif
+
+ #endif /* __ASM_AVR32_KMAP_TYPES_H */
+diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
+index c035339..e1fa594 100644
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
+
+ int exception_trace = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+@@ -178,6 +195,16 @@ bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
+index f3337ee..15b6f8d 100644
+--- a/arch/blackfin/Kconfig.debug
++++ b/arch/blackfin/Kconfig.debug
+@@ -18,6 +18,7 @@ config DEBUG_VERBOSE
+ config DEBUG_MMRS
+ tristate "Generate Blackfin MMR tree"
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ help
+ Create a tree of Blackfin MMRs via the debugfs tree. If
+ you enable this, you will find all MMRs laid out in the
+diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
+index 568885a..f8008df 100644
+--- a/arch/blackfin/include/asm/cache.h
++++ b/arch/blackfin/include/asm/cache.h
+@@ -7,6 +7,7 @@
+ #ifndef __ARCH_BLACKFIN_CACHE_H
+ #define __ARCH_BLACKFIN_CACHE_H
+
++#include <linux/const.h>
+ #include <linux/linkage.h> /* for asmlinkage */
+
+ /*
+@@ -14,7 +15,7 @@
+ * Blackfin loads 32 bytes for cache
+ */
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
+index aea2718..3639a60 100644
+--- a/arch/cris/include/arch-v10/arch/cache.h
++++ b/arch/cris/include/arch-v10/arch/cache.h
+@@ -1,8 +1,9 @@
+ #ifndef _ASM_ARCH_CACHE_H
+ #define _ASM_ARCH_CACHE_H
+
++#include <linux/const.h>
+ /* Etrax 100LX have 32-byte cache-lines. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_ARCH_CACHE_H */
+diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
+index 7caf25d..ee65ac5 100644
+--- a/arch/cris/include/arch-v32/arch/cache.h
++++ b/arch/cris/include/arch-v32/arch/cache.h
+@@ -1,11 +1,12 @@
+ #ifndef _ASM_CRIS_ARCH_CACHE_H
+ #define _ASM_CRIS_ARCH_CACHE_H
+
++#include <linux/const.h>
+ #include <arch/hwregs/dma.h>
+
+ /* A cache-line is 32 bytes. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
+index 102190a..5334cea 100644
+--- a/arch/frv/include/asm/atomic.h
++++ b/arch/frv/include/asm/atomic.h
+@@ -181,6 +181,16 @@ static inline void atomic64_dec(atomic64_t *v)
+ #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
+ #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+ int c, old;
+diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
+index 2797163..c2a401df9 100644
+--- a/arch/frv/include/asm/cache.h
++++ b/arch/frv/include/asm/cache.h
+@@ -12,10 +12,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+ #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
+diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
+index 43901f2..0d8b865 100644
+--- a/arch/frv/include/asm/kmap_types.h
++++ b/arch/frv/include/asm/kmap_types.h
+@@ -2,6 +2,6 @@
+ #ifndef _ASM_KMAP_TYPES_H
+ #define _ASM_KMAP_TYPES_H
+
+-#define KM_TYPE_NR 17
++#define KM_TYPE_NR 18
+
+ #endif
+diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
+index 836f147..4cf23f5 100644
+--- a/arch/frv/mm/elf-fdpic.c
++++ b/arch/frv/mm/elf-fdpic.c
+@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ {
+ struct vm_area_struct *vma;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ goto success;
+ }
+
+@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ info.high_limit = (current->mm->start_stack - 0x00200000);
+ info.align_mask = 0;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ goto success;
+diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
+index 69952c18..4fa2908 100644
+--- a/arch/hexagon/include/asm/cache.h
++++ b/arch/hexagon/include/asm/cache.h
+@@ -21,9 +21,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
++
+ /* Bytes per L1 cache line */
+-#define L1_CACHE_SHIFT (5)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
+index 42a91a7..29d446e 100644
+--- a/arch/ia64/Kconfig
++++ b/arch/ia64/Kconfig
+@@ -518,6 +518,7 @@ source "drivers/sn/Kconfig"
+ config KEXEC
+ bool "kexec system call"
+ depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
+index 970d0bd..e750b9b 100644
+--- a/arch/ia64/Makefile
++++ b/arch/ia64/Makefile
+@@ -98,5 +98,6 @@ endef
+ archprepare: make_nr_irqs_h FORCE
+ PHONY += make_nr_irqs_h FORCE
+
++make_nr_irqs_h: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+ make_nr_irqs_h: FORCE
+ $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
+diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
+index 0bf0350..2ad1957 100644
+--- a/arch/ia64/include/asm/atomic.h
++++ b/arch/ia64/include/asm/atomic.h
+@@ -193,4 +193,14 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
+ #define atomic64_inc(v) atomic64_add(1, (v))
+ #define atomic64_dec(v) atomic64_sub(1, (v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* _ASM_IA64_ATOMIC_H */
+diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
+index 843ba43..fa118fb 100644
+--- a/arch/ia64/include/asm/barrier.h
++++ b/arch/ia64/include/asm/barrier.h
+@@ -66,7 +66,7 @@
+ do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+- ACCESS_ONCE(*p) = (v); \
++ ACCESS_ONCE_RW(*p) = (v); \
+ } while (0)
+
+ #define smp_load_acquire(p) \
+diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
+index 988254a..e1ee885 100644
+--- a/arch/ia64/include/asm/cache.h
++++ b/arch/ia64/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_IA64_CACHE_H
+ #define _ASM_IA64_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+@@ -9,7 +10,7 @@
+
+ /* Bytes per L1 (data) cache line. */
+ #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #ifdef CONFIG_SMP
+ # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
+index 5a83c5c..4d7f553 100644
+--- a/arch/ia64/include/asm/elf.h
++++ b/arch/ia64/include/asm/elf.h
+@@ -42,6 +42,13 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND 0x70000001
+
+ /* IA-64 relocations: */
+diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
+index f5e70e9..624fad5 100644
+--- a/arch/ia64/include/asm/pgalloc.h
++++ b/arch/ia64/include/asm/pgalloc.h
+@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+ pgd_val(*pgd_entry) = __pa(pud);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
++{
++ pgd_populate(mm, pgd_entry, pud);
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+ pud_val(*pud_entry) = __pa(pmd);
+ }
+
++static inline void
++pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
++{
++ pud_populate(mm, pud_entry, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
+index 9f3ed9e..c99b418 100644
+--- a/arch/ia64/include/asm/pgtable.h
++++ b/arch/ia64/include/asm/pgtable.h
+@@ -12,7 +12,7 @@
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+-
++#include <linux/const.h>
+ #include <asm/mman.h>
+ #include <asm/page.h>
+ #include <asm/processor.h>
+@@ -139,6 +139,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
+index 45698cd..e8e2dbc 100644
+--- a/arch/ia64/include/asm/spinlock.h
++++ b/arch/ia64/include/asm/spinlock.h
+@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+ unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
+
+ asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
+ }
+
+ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
+index 4f3fb6cc..254055e 100644
+--- a/arch/ia64/include/asm/uaccess.h
++++ b/arch/ia64/include/asm/uaccess.h
+@@ -70,6 +70,7 @@
+ && ((segment).seg == KERNEL_DS.seg \
+ || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
+ })
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
+
+ /*
+@@ -241,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
+ static inline unsigned long
+ __copy_to_user (void __user *to, const void *from, unsigned long count)
+ {
++ if (count > INT_MAX)
++ return count;
++
++ if (!__builtin_constant_p(count))
++ check_object_size(from, count, true);
++
+ return __copy_user(to, (__force void __user *) from, count);
+ }
+
+ static inline unsigned long
+ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ {
++ if (count > INT_MAX)
++ return count;
++
++ if (!__builtin_constant_p(count))
++ check_object_size(to, count, false);
++
+ return __copy_user((__force void __user *) to, from, count);
+ }
+
+@@ -256,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({ \
+ void __user *__cu_to = (to); \
+ const void *__cu_from = (from); \
+- long __cu_len = (n); \
++ unsigned long __cu_len = (n); \
+ \
+- if (__access_ok(__cu_to, __cu_len, get_fs())) \
++ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
++ if (!__builtin_constant_p(n)) \
++ check_object_size(__cu_from, __cu_len, true); \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
++ } \
+ __cu_len; \
+ })
+
+@@ -267,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({ \
+ void *__cu_to = (to); \
+ const void __user *__cu_from = (from); \
+- long __cu_len = (n); \
++ unsigned long __cu_len = (n); \
+ \
+ __chk_user_ptr(__cu_from); \
+- if (__access_ok(__cu_from, __cu_len, get_fs())) \
++ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
++ if (!__builtin_constant_p(n)) \
++ check_object_size(__cu_to, __cu_len, false); \
+ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
++ } \
+ __cu_len; \
+ })
+
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index b15933c..098b1c8 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -484,15 +484,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
+ }
+
+ static inline int
++in_init_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
++}
++
++static inline int
++in_init_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
++}
++
++static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_init < mod->init_size;
++ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
++}
++
++static inline int
++in_core_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
++}
++
++static inline int
++in_core_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
+ }
+
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_core < mod->core_size;
++ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
+ }
+
+ static inline int
+@@ -675,7 +699,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
+ break;
+
+ case RV_BDREL:
+- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
++ if (in_init_rx(mod, val))
++ val -= (uint64_t) mod->module_init_rx;
++ else if (in_init_rw(mod, val))
++ val -= (uint64_t) mod->module_init_rw;
++ else if (in_core_rx(mod, val))
++ val -= (uint64_t) mod->module_core_rx;
++ else if (in_core_rw(mod, val))
++ val -= (uint64_t) mod->module_core_rw;
+ break;
+
+ case RV_LTV:
+@@ -810,15 +841,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
+ * addresses have been selected...
+ */
+ uint64_t gp;
+- if (mod->core_size > MAX_LTOFF)
++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
+ /*
+ * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+ * at the end of the module.
+ */
+- gp = mod->core_size - MAX_LTOFF / 2;
++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
+ else
+- gp = mod->core_size / 2;
+- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
++ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
+ mod->arch.gp = gp;
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+ }
+diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
+index c39c3cd..3c77738 100644
+--- a/arch/ia64/kernel/palinfo.c
++++ b/arch/ia64/kernel/palinfo.c
+@@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block __refdata palinfo_cpu_notifier =
++static struct notifier_block palinfo_cpu_notifier =
+ {
+ .notifier_call = palinfo_cpu_callback,
+ .priority = 0,
+diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
+index 41e33f8..65180b2a 100644
+--- a/arch/ia64/kernel/sys_ia64.c
++++ b/arch/ia64/kernel/sys_ia64.c
+@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ unsigned long align_mask = 0;
+ struct mm_struct *mm = current->mm;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ info.high_limit = TASK_SIZE;
+ info.align_mask = align_mask;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
+index dc506b0..39baade 100644
+--- a/arch/ia64/kernel/vmlinux.lds.S
++++ b/arch/ia64/kernel/vmlinux.lds.S
+@@ -171,7 +171,7 @@ SECTIONS {
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
+- __phys_per_cpu_start = __per_cpu_load;
++ __phys_per_cpu_start = per_cpu_load;
+ /*
+ * ensure percpu data fits
+ * into percpu page size
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index 70b40d1..01a9a28 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ # define VM_READ_BIT 0
+ # define VM_WRITE_BIT 1
+ # define VM_EXEC_BIT 2
+@@ -151,8 +168,21 @@ retry:
+ if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
+ goto bad_area;
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index f50d4b3..c7975ee 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -138,6 +138,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ unsigned long pgoff, unsigned long flags)
+ {
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
+
+ if (len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+@@ -161,6 +162,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
+ info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ return vm_unmapped_area(&info);
+ }
+
+diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
+index 97e48b0..fc59c36 100644
+--- a/arch/ia64/mm/init.c
++++ b/arch/ia64/mm/init.c
+@@ -119,6 +119,19 @@ ia64_init_addr_space (void)
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++ vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ down_write(&current->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+@@ -279,7 +292,7 @@ static int __init gate_vma_init(void)
+ gate_vma.vm_start = FIXADDR_USER_START;
+ gate_vma.vm_end = FIXADDR_USER_END;
+ gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
+- gate_vma.vm_page_prot = __P101;
++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
+
+ return 0;
+ }
+diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
+index 40b3ee98..8c2c112 100644
+--- a/arch/m32r/include/asm/cache.h
++++ b/arch/m32r/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef _ASM_M32R_CACHE_H
+ #define _ASM_M32R_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_M32R_CACHE_H */
+diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
+index 82abd15..d95ae5d 100644
+--- a/arch/m32r/lib/usercopy.c
++++ b/arch/m32r/lib/usercopy.c
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to,from,n);
+diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
+index 0395c51..5f26031 100644
+--- a/arch/m68k/include/asm/cache.h
++++ b/arch/m68k/include/asm/cache.h
+@@ -4,9 +4,11 @@
+ #ifndef __ARCH_M68K_CACHE_H
+ #define __ARCH_M68K_CACHE_H
+
++#include <linux/const.h>
++
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
+index 5a696e5..070490d 100644
+--- a/arch/metag/include/asm/barrier.h
++++ b/arch/metag/include/asm/barrier.h
+@@ -90,7 +90,7 @@ static inline void fence(void)
+ do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+- ACCESS_ONCE(*p) = (v); \
++ ACCESS_ONCE_RW(*p) = (v); \
+ } while (0)
+
+ #define smp_load_acquire(p) \
+diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
+index 53f0f6c..2dc07fd 100644
+--- a/arch/metag/mm/hugetlbpage.c
++++ b/arch/metag/mm/hugetlbpage.c
+@@ -189,6 +189,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
+ info.high_limit = TASK_SIZE;
+ info.align_mask = PAGE_MASK & HUGEPT_MASK;
+ info.align_offset = 0;
++ info.threadstack_offset = 0;
+ return vm_unmapped_area(&info);
+ }
+
+diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
+index 4efe96a..60e8699 100644
+--- a/arch/microblaze/include/asm/cache.h
++++ b/arch/microblaze/include/asm/cache.h
+@@ -13,11 +13,12 @@
+ #ifndef _ASM_MICROBLAZE_CACHE_H
+ #define _ASM_MICROBLAZE_CACHE_H
+
++#include <linux/const.h>
+ #include <asm/registers.h>
+
+ #define L1_CACHE_SHIFT 5
+ /* word-granular cache in microblaze */
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index 199a835..822b487 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2591,6 +2591,7 @@ source "kernel/Kconfig.preempt"
+
+ config KEXEC
+ bool "Kexec system call"
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
+index d8960d4..77dbd31 100644
+--- a/arch/mips/cavium-octeon/dma-octeon.c
++++ b/arch/mips/cavium-octeon/dma-octeon.c
+@@ -199,7 +199,7 @@ static void octeon_dma_free_coherent(struct device *dev, size_t size,
+ if (dma_release_from_coherent(dev, order, vaddr))
+ return;
+
+- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
++ swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
+ }
+
+ static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
+diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
+index 26d4363..3c9a82e 100644
+--- a/arch/mips/include/asm/atomic.h
++++ b/arch/mips/include/asm/atomic.h
+@@ -22,15 +22,39 @@
+ #include <asm/cmpxchg.h>
+ #include <asm/war.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
++#ifdef CONFIG_64BIT
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++" .dword " #from ", " #to"\n" \
++" .previous\n"
++#else
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++" .word " #from ", " #to"\n" \
++" .previous\n"
++#endif
++
+ /*
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+-#define atomic_read(v) ACCESS_ONCE((v)->counter)
++static inline int atomic_read(const atomic_t *v)
++{
++ return ACCESS_ONCE(v->counter);
++}
++
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return ACCESS_ONCE(v->counter);
++}
+
+ /*
+ * atomic_set - set atomic variable
+@@ -39,47 +63,77 @@
+ *
+ * Atomically sets the value of @v to @i.
+ */
+-#define atomic_set(v, i) ((v)->counter = (i))
++static inline void atomic_set(atomic_t *v, int i)
++{
++ v->counter = i;
++}
+
+-#define ATOMIC_OP(op, c_op, asm_op) \
+-static __inline__ void atomic_##op(int i, atomic_t * v) \
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
++
++#ifdef CONFIG_PAX_REFCOUNT
++#define __OVERFLOW_POST \
++ " b 4f \n" \
++ " .set noreorder \n" \
++ "3: b 5f \n" \
++ " move %0, %1 \n" \
++ " .set reorder \n"
++#define __OVERFLOW_EXTABLE \
++ "3:\n" \
++ _ASM_EXTABLE(2b, 3b)
++#else
++#define __OVERFLOW_POST
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, asm_op, extable) \
++static inline void atomic_##op##suffix(int i, atomic##suffix##_t * v) \
+ { \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
+- "1: ll %0, %1 # atomic_" #op " \n" \
+- " " #asm_op " %0, %2 \n" \
++ " .set mips3 \n" \
++ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
+ " sc %0, %1 \n" \
+ " beqzl %0, 1b \n" \
++ extable \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " ll %0, %1 # atomic_" #op "\n" \
+- " " #asm_op " %0, %2 \n" \
+- " sc %0, %1 \n" \
+- " .set mips0 \n" \
+- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
+- } while (unlikely(!temp)); \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: ll %0, %1 # atomic_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
++ " sc %0, %1 \n" \
++ " beqz %0, 1b \n" \
++ extable \
++ " .set mips0 \n" \
++ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- v->counter c_op i; \
++ __asm__ __volatile__( \
++ "2: " #asm_op " %0, %1 \n" \
++ extable \
++ : "+r" (v->counter) : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ }
+
+-#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
+-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
++#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, _unchecked, asm_op##u, ) \
++ __ATOMIC_OP(op, , asm_op, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op, extable) \
++static inline int atomic_##op##_return##suffix(int i, atomic##suffix##_t * v) \
+ { \
+ int result; \
+ \
+@@ -89,12 +143,15 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
+- "1: ll %1, %2 # atomic_" #op "_return \n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ " .set mips3 \n" \
++ "1: ll %1, %2 # atomic_" #op "_return" #suffix"\n" \
++ "2: " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+@@ -102,26 +159,33 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " ll %1, %2 # atomic_" #op "_return \n" \
+- " " #asm_op " %0, %1, %3 \n" \
+- " sc %0, %2 \n" \
+- " .set mips0 \n" \
+- : "=&r" (result), "=&r" (temp), \
+- "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
+- } while (unlikely(!result)); \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: ll %1, %2 # atomic_" #op "_return" #suffix "\n" \
++ "2: " #asm_op " %0, %1, %3 \n" \
++ " sc %0, %2 \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
++ " .set mips0 \n" \
++ : "=&r" (result), "=&r" (temp), \
++ "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ \
+ result = temp; result c_op i; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- result = v->counter; \
+- result c_op i; \
+- v->counter = result; \
++ __asm__ __volatile__( \
++ " lw %0, %1 \n" \
++ "2: " #asm_op " %0, %1, %2 \n" \
++ " sw %0, %1 \n" \
++ "3: \n" \
++ extable \
++ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+@@ -130,16 +194,21 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+ return result; \
+ }
+
+-#define ATOMIC_OPS(op, c_op, asm_op) \
+- ATOMIC_OP(op, c_op, asm_op) \
+- ATOMIC_OP_RETURN(op, c_op, asm_op)
++#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, _unchecked, asm_op##u, , ) \
++ __ATOMIC_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
+
+-ATOMIC_OPS(add, +=, addu)
+-ATOMIC_OPS(sub, -=, subu)
++#define ATOMIC_OPS(op, asm_op) \
++ ATOMIC_OP(op, asm_op) \
++ ATOMIC_OP_RETURN(op, asm_op)
++
++ATOMIC_OPS(add, add)
++ATOMIC_OPS(sub, sub)
+
+ #undef ATOMIC_OPS
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ /*
+ * atomic_sub_if_positive - conditionally subtract integer from atomic variable
+@@ -149,7 +218,7 @@ ATOMIC_OPS(sub, -=, subu)
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
++static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
+ {
+ int result;
+
+@@ -159,7 +228,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ int temp;
+
+ __asm__ __volatile__(
+- " .set arch=r4000 \n"
++ " .set "MIPS_ISA_LEVEL" \n"
+ "1: ll %1, %2 # atomic_sub_if_positive\n"
+ " subu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+@@ -208,8 +277,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ return result;
+ }
+
+-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
++ int new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline int atomic_xchg(atomic_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&(v->counter), new);
++}
+
+ /**
+ * __atomic_add_unless - add unless the number is a given value
+@@ -237,6 +324,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+
+ #define atomic_dec_return(v) atomic_sub_return(1, (v))
+ #define atomic_inc_return(v) atomic_add_return(1, (v))
++static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+
+ /*
+ * atomic_sub_and_test - subtract value from variable and test result
+@@ -258,6 +349,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v) == 0;
++}
+
+ /*
+ * atomic_dec_and_test - decrement by 1 and test
+@@ -282,6 +377,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically increments @v by 1.
+ */
+ #define atomic_inc(v) atomic_add(1, (v))
++static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+
+ /*
+ * atomic_dec - decrement and test
+@@ -290,6 +389,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically decrements @v by 1.
+ */
+ #define atomic_dec(v) atomic_sub(1, (v))
++static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+
+ /*
+ * atomic_add_negative - add and test if negative
+@@ -311,54 +414,77 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * @v: pointer of type atomic64_t
+ *
+ */
+-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
++static inline long atomic64_read(const atomic64_t *v)
++{
++ return ACCESS_ONCE(v->counter);
++}
++
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return ACCESS_ONCE(v->counter);
++}
+
+ /*
+ * atomic64_set - set atomic variable
+ * @v: pointer of type atomic64_t
+ * @i: required value
+ */
+-#define atomic64_set(v, i) ((v)->counter = (i))
++static inline void atomic64_set(atomic64_t *v, long i)
++{
++ v->counter = i;
++}
+
+-#define ATOMIC64_OP(op, c_op, asm_op) \
+-static __inline__ void atomic64_##op(long i, atomic64_t * v) \
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
++
++#define __ATOMIC64_OP(op, suffix, asm_op, extable) \
++static inline void atomic64_##op##suffix(long i, atomic64##suffix##_t * v) \
+ { \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
+- "1: lld %0, %1 # atomic64_" #op " \n" \
+- " " #asm_op " %0, %2 \n" \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
+ " scd %0, %1 \n" \
+ " beqzl %0, 1b \n" \
++ extable \
+ " .set mips0 \n" \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " lld %0, %1 # atomic64_" #op "\n" \
+- " " #asm_op " %0, %2 \n" \
+- " scd %0, %1 \n" \
+- " .set mips0 \n" \
+- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i)); \
+- } while (unlikely(!temp)); \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: lld %0, %1 # atomic64_" #op #suffix "\n" \
++ "2: " #asm_op " %0, %2 \n" \
++ " scd %0, %1 \n" \
++ " beqz %0, 1b \n" \
++ extable \
++ " .set mips0 \n" \
++ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- v->counter c_op i; \
++ __asm__ __volatile__( \
++ "2: " #asm_op " %0, %1 \n" \
++ extable \
++ : "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ }
+
+-#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
++#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, _unchecked, asm_op##u, ) \
++ __ATOMIC64_OP(op, , asm_op, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op, extable) \
++static inline long atomic64_##op##_return##suffix(long i, atomic64##suffix##_t * v)\
+ { \
+ long result; \
+ \
+@@ -368,12 +494,15 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+- " .set arch=r4000 \n" \
++ " .set mips3 \n" \
+ "1: lld %1, %2 # atomic64_" #op "_return\n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ "2: " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+- " " #asm_op " %0, %1, %3 \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+@@ -381,27 +510,35 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+- do { \
+- __asm__ __volatile__( \
+- " .set "MIPS_ISA_LEVEL" \n" \
+- " lld %1, %2 # atomic64_" #op "_return\n" \
+- " " #asm_op " %0, %1, %3 \n" \
+- " scd %0, %2 \n" \
+- " .set mips0 \n" \
+- : "=&r" (result), "=&r" (temp), \
+- "=" GCC_OFF_SMALL_ASM() (v->counter) \
+- : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
+- : "memory"); \
+- } while (unlikely(!result)); \
++ __asm__ __volatile__( \
++ " .set "MIPS_ISA_LEVEL" \n" \
++ "1: lld %1, %2 # atomic64_" #op "_return" #suffix "\n"\
++ "2: " #asm_op " %0, %1, %3 \n" \
++ " scd %0, %2 \n" \
++ " beqz %0, 1b \n" \
++ post_op \
++ extable \
++ "4: " #asm_op " %0, %1, %3 \n" \
++ "5: \n" \
++ " .set mips0 \n" \
++ : "=&r" (result), "=&r" (temp), \
++ "=" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
++ : "memory"); \
+ \
+ result = temp; result c_op i; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+- result = v->counter; \
+- result c_op i; \
+- v->counter = result; \
++ __asm__ __volatile__( \
++ " ld %0, %1 \n" \
++ "2: " #asm_op " %0, %1, %2 \n" \
++ " sd %0, %1 \n" \
++ "3: \n" \
++ extable \
++ : "=&r" (result), "+" GCC_OFF_SMALL_ASM() (v->counter) \
++ : "Ir" (i)); \
+ raw_local_irq_restore(flags); \
+ } \
+ \
+@@ -410,16 +547,23 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+ return result; \
+ }
+
+-#define ATOMIC64_OPS(op, c_op, asm_op) \
+- ATOMIC64_OP(op, c_op, asm_op) \
+- ATOMIC64_OP_RETURN(op, c_op, asm_op)
++#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, _unchecked, asm_op##u, , ) \
++ __ATOMIC64_OP_RETURN(op, , asm_op, __OVERFLOW_POST, __OVERFLOW_EXTABLE)
+
+-ATOMIC64_OPS(add, +=, daddu)
+-ATOMIC64_OPS(sub, -=, dsubu)
++#define ATOMIC64_OPS(op, asm_op) \
++ ATOMIC64_OP(op, asm_op) \
++ ATOMIC64_OP_RETURN(op, asm_op)
++
++ATOMIC64_OPS(add, dadd)
++ATOMIC64_OPS(sub, dsub)
+
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST
+
+ /*
+ * atomic64_sub_if_positive - conditionally subtract integer from atomic
+@@ -430,7 +574,7 @@ ATOMIC64_OPS(sub, -=, dsubu)
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
++static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
+ {
+ long result;
+
+@@ -440,7 +584,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+ long temp;
+
+ __asm__ __volatile__(
+- " .set arch=r4000 \n"
++ " .set "MIPS_ISA_LEVEL" \n"
+ "1: lld %1, %2 # atomic64_sub_if_positive\n"
+ " dsubu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+@@ -489,9 +633,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+ return result;
+ }
+
+-#define atomic64_cmpxchg(v, o, n) \
+- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
++ long new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline long atomic64_xchg(atomic64_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
++
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&(v->counter), new);
++}
+
+ /**
+ * atomic64_add_unless - add unless the number is a given value
+@@ -521,6 +682,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+ #define atomic64_inc_return(v) atomic64_add_return(1, (v))
++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
+
+ /*
+ * atomic64_sub_and_test - subtract value from variable and test result
+@@ -542,6 +704,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * other cases.
+ */
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
++#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
+
+ /*
+ * atomic64_dec_and_test - decrement by 1 and test
+@@ -566,6 +729,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * Atomically increments @v by 1.
+ */
+ #define atomic64_inc(v) atomic64_add(1, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
+
+ /*
+ * atomic64_dec - decrement and test
+@@ -574,6 +738,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * Atomically decrements @v by 1.
+ */
+ #define atomic64_dec(v) atomic64_sub(1, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
+
+ /*
+ * atomic64_add_negative - add and test if negative
+diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
+index 7ecba84..21774af 100644
+--- a/arch/mips/include/asm/barrier.h
++++ b/arch/mips/include/asm/barrier.h
+@@ -133,7 +133,7 @@
+ do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+- ACCESS_ONCE(*p) = (v); \
++ ACCESS_ONCE_RW(*p) = (v); \
+ } while (0)
+
+ #define smp_load_acquire(p) \
+diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
+index b4db69f..8f3b093 100644
+--- a/arch/mips/include/asm/cache.h
++++ b/arch/mips/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #ifndef _ASM_CACHE_H
+ #define _ASM_CACHE_H
+
++#include <linux/const.h>
+ #include <kmalloc.h>
+
+ #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
+index f19e890..a4f8177 100644
+--- a/arch/mips/include/asm/elf.h
++++ b/arch/mips/include/asm/elf.h
+@@ -417,6 +417,13 @@ extern const char *__elf_platform;
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+ struct linux_binprm;
+ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
+index c1f6afa..38cc6e9 100644
+--- a/arch/mips/include/asm/exec.h
++++ b/arch/mips/include/asm/exec.h
+@@ -12,6 +12,6 @@
+ #ifndef _ASM_EXEC_H
+ #define _ASM_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_EXEC_H */
+diff --git a/arch/mips/include/asm/hw_irq.h b/arch/mips/include/asm/hw_irq.h
+index 9e8ef59..1139d6b 100644
+--- a/arch/mips/include/asm/hw_irq.h
++++ b/arch/mips/include/asm/hw_irq.h
+@@ -10,7 +10,7 @@
+
+ #include <linux/atomic.h>
+
+-extern atomic_t irq_err_count;
++extern atomic_unchecked_t irq_err_count;
+
+ /*
+ * interrupt-retrigger: NOP for now. This may not be appropriate for all
+diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
+index 8feaed6..1bd8a64 100644
+--- a/arch/mips/include/asm/local.h
++++ b/arch/mips/include/asm/local.h
+@@ -13,15 +13,25 @@ typedef struct
+ atomic_long_t a;
+ } local_t;
+
++typedef struct {
++ atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l, i) atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
+
+ #define local_add(i, l) atomic_long_add((i), (&(l)->a))
++#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
+ #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
++#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
+ #define local_inc(l) atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l) atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
+
+ /*
+ * Same as above, but return the result value
+@@ -71,6 +81,51 @@ static __inline__ long local_add_return(long i, local_t * l)
+ return result;
+ }
+
++static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
++{
++ unsigned long result;
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ unsigned long temp;
++
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1:" __LL "%1, %2 # local_add_return \n"
++ " addu %0, %1, %3 \n"
++ __SC "%0, %2 \n"
++ " beqzl %0, 1b \n"
++ " addu %0, %1, %3 \n"
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
++ : "Ir" (i), "m" (l->a.counter)
++ : "memory");
++ } else if (kernel_uses_llsc) {
++ unsigned long temp;
++
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1:" __LL "%1, %2 # local_add_return \n"
++ " addu %0, %1, %3 \n"
++ __SC "%0, %2 \n"
++ " beqz %0, 1b \n"
++ " addu %0, %1, %3 \n"
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
++ : "Ir" (i), "m" (l->a.counter)
++ : "memory");
++ } else {
++ unsigned long flags;
++
++ local_irq_save(flags);
++ result = l->a.counter;
++ result += i;
++ l->a.counter = result;
++ local_irq_restore(flags);
++ }
++
++ return result;
++}
++
+ static __inline__ long local_sub_return(long i, local_t * l)
+ {
+ unsigned long result;
+@@ -118,6 +173,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
+
+ #define local_cmpxchg(l, o, n) \
+ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++ ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
+ #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
+
+ /**
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index 89dd7fe..a123c97 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -118,7 +118,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
+index b336037..5b874cc 100644
+--- a/arch/mips/include/asm/pgalloc.h
++++ b/arch/mips/include/asm/pgalloc.h
+@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+ set_pud(pud, __pud((unsigned long)pmd));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
+ #endif
+
+ /*
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index ae85694..4cdbba8 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -20,6 +20,9 @@
+ #include <asm/io.h>
+ #include <asm/pgtable-bits.h>
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ struct mm_struct;
+ struct vm_area_struct;
+
+diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
+index 9c0014e..5101ef5 100644
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -100,6 +100,9 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_SECCOMP 4 /* secure computing */
+ #define TIF_NOTIFY_RESUME 5 /* callback before returning to user */
+ #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
++/* li takes a 32bit immediate */
++#define TIF_GRSEC_SETXID 10 /* update credentials on syscall entry/exit */
++
+ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
+ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+ #define TIF_NOHZ 19 /* in adaptive nohz mode */
+@@ -135,14 +138,16 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_USEDMSA (1<<TIF_USEDMSA)
+ #define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
+
+ #define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
+ _TIF_SYSCALL_AUDIT | \
+- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
++ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
++ _TIF_GRSEC_SETXID)
+
+ /* work to do in syscall_trace_leave() */
+ #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
+- _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
++ _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+
+ /* work to do on interrupt/exception return */
+ #define _TIF_WORK_MASK \
+@@ -150,7 +155,7 @@ static inline struct thread_info *current_thread_info(void)
+ /* work to do on any return to u-space */
+ #define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
+ _TIF_WORK_SYSCALL_EXIT | \
+- _TIF_SYSCALL_TRACEPOINT)
++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+
+ /*
+ * We stash processor id into a COP0 register to retrieve it fast
+diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
+index 5305d69..1da2bf5 100644
+--- a/arch/mips/include/asm/uaccess.h
++++ b/arch/mips/include/asm/uaccess.h
+@@ -146,6 +146,7 @@ static inline bool eva_kernel_access(void)
+ __ok == 0; \
+ })
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) \
+ likely(__access_ok((addr), (size), __access_mask))
+
+diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
+index 1188e00..41cf144 100644
+--- a/arch/mips/kernel/binfmt_elfn32.c
++++ b/arch/mips/kernel/binfmt_elfn32.c
+@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
+index 9287678..f870e47 100644
+--- a/arch/mips/kernel/binfmt_elfo32.c
++++ b/arch/mips/kernel/binfmt_elfo32.c
+@@ -70,6 +70,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+
+ #include <linux/module.h>
+diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
+index 74f6752..f3d7a47 100644
+--- a/arch/mips/kernel/i8259.c
++++ b/arch/mips/kernel/i8259.c
+@@ -205,7 +205,7 @@ spurious_8259A_irq:
+ printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
+index 44a1f79..2bd6aa3 100644
+--- a/arch/mips/kernel/irq-gt641xx.c
++++ b/arch/mips/kernel/irq-gt641xx.c
+@@ -110,7 +110,7 @@ void gt641xx_irq_dispatch(void)
+ }
+ }
+
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ }
+
+ void __init gt641xx_irq_init(void)
+diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
+index 8eb5af8..2baf465 100644
+--- a/arch/mips/kernel/irq.c
++++ b/arch/mips/kernel/irq.c
+@@ -34,17 +34,17 @@ void ack_bad_irq(unsigned int irq)
+ printk("unexpected IRQ # %d\n", irq);
+ }
+
+-atomic_t irq_err_count;
++atomic_unchecked_t irq_err_count;
+
+ int arch_show_interrupts(struct seq_file *p, int prec)
+ {
+- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
++ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
+ return 0;
+ }
+
+ asmlinkage void spurious_interrupt(void)
+ {
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ }
+
+ void __init init_IRQ(void)
+@@ -58,6 +58,8 @@ void __init init_IRQ(void)
+ }
+
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
++
++extern void gr_handle_kernel_exploit(void);
+ static inline void check_stack_overflow(void)
+ {
+ unsigned long sp;
+@@ -73,6 +75,7 @@ static inline void check_stack_overflow(void)
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
++ gr_handle_kernel_exploit();
+ }
+ }
+ #else
+diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
+index 0614717..002fa43 100644
+--- a/arch/mips/kernel/pm-cps.c
++++ b/arch/mips/kernel/pm-cps.c
+@@ -172,7 +172,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
+ nc_core_ready_count = nc_addr;
+
+ /* Ensure ready_count is zero-initialised before the assembly runs */
+- ACCESS_ONCE(*nc_core_ready_count) = 0;
++ ACCESS_ONCE_RW(*nc_core_ready_count) = 0;
+ coupled_barrier(&per_cpu(pm_barrier, core), online);
+
+ /* Run the generated entry code */
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index f2975d4..f61d355 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -541,18 +541,6 @@ out:
+ return pc;
+ }
+
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+-
+- return sp & ALMASK;
+-}
+-
+ static void arch_dump_stack(void *info)
+ {
+ struct pt_regs *regs;
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index e933a30..0d02625 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -785,6 +785,10 @@ long arch_ptrace(struct task_struct *child, long request,
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+@@ -803,6 +807,11 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+ tracehook_report_syscall_entry(regs))
+ ret = -1;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[2]);
+
+diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
+index 2242bdd..b284048 100644
+--- a/arch/mips/kernel/sync-r4k.c
++++ b/arch/mips/kernel/sync-r4k.c
+@@ -18,8 +18,8 @@
+ #include <asm/mipsregs.h>
+
+ static atomic_t count_start_flag = ATOMIC_INIT(0);
+-static atomic_t count_count_start = ATOMIC_INIT(0);
+-static atomic_t count_count_stop = ATOMIC_INIT(0);
++static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
++static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
+ static atomic_t count_reference = ATOMIC_INIT(0);
+
+ #define COUNTON 100
+@@ -58,13 +58,13 @@ void synchronise_count_master(int cpu)
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ /* slaves loop on '!= 2' */
+- while (atomic_read(&count_count_start) != 1)
++ while (atomic_read_unchecked(&count_count_start) != 1)
+ mb();
+- atomic_set(&count_count_stop, 0);
++ atomic_set_unchecked(&count_count_stop, 0);
+ smp_wmb();
+
+ /* this lets the slaves write their count register */
+- atomic_inc(&count_count_start);
++ atomic_inc_unchecked(&count_count_start);
+
+ /*
+ * Everyone initialises count in the last loop:
+@@ -75,11 +75,11 @@ void synchronise_count_master(int cpu)
+ /*
+ * Wait for all slaves to leave the synchronization point:
+ */
+- while (atomic_read(&count_count_stop) != 1)
++ while (atomic_read_unchecked(&count_count_stop) != 1)
+ mb();
+- atomic_set(&count_count_start, 0);
++ atomic_set_unchecked(&count_count_start, 0);
+ smp_wmb();
+- atomic_inc(&count_count_stop);
++ atomic_inc_unchecked(&count_count_stop);
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+@@ -112,8 +112,8 @@ void synchronise_count_slave(int cpu)
+ initcount = atomic_read(&count_reference);
+
+ for (i = 0; i < NR_LOOPS; i++) {
+- atomic_inc(&count_count_start);
+- while (atomic_read(&count_count_start) != 2)
++ atomic_inc_unchecked(&count_count_start);
++ while (atomic_read_unchecked(&count_count_start) != 2)
+ mb();
+
+ /*
+@@ -122,8 +122,8 @@ void synchronise_count_slave(int cpu)
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+- atomic_inc(&count_count_stop);
+- while (atomic_read(&count_count_stop) != 2)
++ atomic_inc_unchecked(&count_count_stop);
++ while (atomic_read_unchecked(&count_count_stop) != 2)
+ mb();
+ }
+ /* Arrange for an interrupt in a short while */
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 8ea28e6..c8873d5 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -697,7 +697,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
+ siginfo_t info;
+
+ prev_state = exception_enter();
+- die_if_kernel("Integer overflow", regs);
++ if (unlikely(!user_mode(regs))) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (fixup_exception(regs)) {
++ pax_report_refcount_overflow(regs);
++ exception_exit(prev_state);
++ return;
++ }
++#endif
++
++ die("Integer overflow", regs);
++ }
+
+ info.si_code = FPE_INTOVF;
+ info.si_signo = SIGFPE;
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index cd4c129..290c518 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -1016,7 +1016,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+ return r;
+ }
+
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+ if (kvm_mips_callbacks) {
+ kvm_err("kvm: module already exists\n");
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index 852a41c..75b9d38 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -31,6 +31,23 @@
+
+ int show_unhandled_signals = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+@@ -207,6 +224,14 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
++ pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ tsk->thread.cp0_badvaddr = address;
+ tsk->thread.error_code = write;
+ if (show_unhandled_signals &&
+diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
+index 5c81fdd..db158d3 100644
+--- a/arch/mips/mm/mmap.c
++++ b/arch/mips/mm/mmap.c
+@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ struct vm_area_struct *vma;
+ unsigned long addr = addr0;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (unlikely(len > TASK_SIZE))
+@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ do_color_align = 1;
+
+ /* requesting a specific address */
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+ info.length = len;
+ info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+
+ if (dir == DOWN) {
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+@@ -160,45 +166,34 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = get_random_int();
+-
+- rnd = rnd << PAGE_SHIFT;
+- /* 8MB for 32bit, 256MB for 64bit */
+- if (TASK_IS_32BIT_ADDR)
+- rnd = rnd & 0x7ffffful;
+- else
+- rnd = rnd & 0xffffffful;
+-
+- return rnd;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+ int __virt_addr_valid(const volatile void *kaddr)
+ {
+ return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
+diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
+index dabf417..0be1d6d 100644
+--- a/arch/mips/net/bpf_jit_asm.S
++++ b/arch/mips/net/bpf_jit_asm.S
+@@ -62,7 +62,9 @@ sk_load_word_positive:
+ is_offset_in_header(4, word)
+ /* Offset within header boundaries */
+ PTR_ADDU t1, $r_skb_data, offset
++ .set reorder
+ lw $r_A, 0(t1)
++ .set noreorder
+ #ifdef CONFIG_CPU_LITTLE_ENDIAN
+ # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
+ wsbh t0, $r_A
+@@ -90,7 +92,9 @@ sk_load_half_positive:
+ is_offset_in_header(2, half)
+ /* Offset within header boundaries */
+ PTR_ADDU t1, $r_skb_data, offset
++ .set reorder
+ lh $r_A, 0(t1)
++ .set noreorder
+ #ifdef CONFIG_CPU_LITTLE_ENDIAN
+ # if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
+ wsbh t0, $r_A
+diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
+index a2358b4..7cead4f 100644
+--- a/arch/mips/sgi-ip27/ip27-nmi.c
++++ b/arch/mips/sgi-ip27/ip27-nmi.c
+@@ -187,9 +187,9 @@ void
+ cont_nmi_dump(void)
+ {
+ #ifndef REAL_NMI_SIGNAL
+- static atomic_t nmied_cpus = ATOMIC_INIT(0);
++ static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
+
+- atomic_inc(&nmied_cpus);
++ atomic_inc_unchecked(&nmied_cpus);
+ #endif
+ /*
+ * Only allow 1 cpu to proceed
+@@ -233,7 +233,7 @@ cont_nmi_dump(void)
+ udelay(10000);
+ }
+ #else
+- while (atomic_read(&nmied_cpus) != num_online_cpus());
++ while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
+ #endif
+
+ /*
+diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
+index a046b30..6799527 100644
+--- a/arch/mips/sni/rm200.c
++++ b/arch/mips/sni/rm200.c
+@@ -270,7 +270,7 @@ spurious_8259A_irq:
+ "spurious RM200 8259A interrupt: IRQ%d.\n", irq);
+ spurious_irq_mask |= irqmask;
+ }
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ /*
+ * Theoretically we do not have to handle this IRQ,
+ * but in Linux this does not cause problems and is
+diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
+index 41e873b..34d33a7 100644
+--- a/arch/mips/vr41xx/common/icu.c
++++ b/arch/mips/vr41xx/common/icu.c
+@@ -653,7 +653,7 @@ static int icu_get_irq(unsigned int irq)
+
+ printk(KERN_ERR "spurious ICU interrupt: %04x,%04x\n", pend1, pend2);
+
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+
+ return -1;
+ }
+diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c
+index ae0e4ee..e8f0692 100644
+--- a/arch/mips/vr41xx/common/irq.c
++++ b/arch/mips/vr41xx/common/irq.c
+@@ -64,7 +64,7 @@ static void irq_dispatch(unsigned int irq)
+ irq_cascade_t *cascade;
+
+ if (irq >= NR_IRQS) {
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ return;
+ }
+
+@@ -84,7 +84,7 @@ static void irq_dispatch(unsigned int irq)
+ ret = cascade->get_irq(irq);
+ irq = ret;
+ if (ret < 0)
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+ else
+ irq_dispatch(irq);
+ if (!irqd_irq_disabled(idata) && chip->irq_unmask)
+diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+index 967d144..db12197 100644
+--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
++++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+@@ -11,12 +11,14 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache */
+
+ #define L1_CACHE_NWAYS 4 /* number of ways in caches */
+ #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
+-#define L1_CACHE_BYTES 16 /* bytes per entry */
+ #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
+ #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
+
+ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+index bcb5df2..84fabd2 100644
+--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
++++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
+@@ -16,13 +16,15 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
+
++#include <linux/const.h>
++
+ /*
+ * L1 cache
+ */
+ #define L1_CACHE_NWAYS 4 /* number of ways in caches */
+ #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
+-#define L1_CACHE_BYTES 32 /* bytes per entry */
+ #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
+ #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
+
+ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
+index 4ce7a01..449202a 100644
+--- a/arch/openrisc/include/asm/cache.h
++++ b/arch/openrisc/include/asm/cache.h
+@@ -19,11 +19,13 @@
+ #ifndef __ASM_OPENRISC_CACHE_H
+ #define __ASM_OPENRISC_CACHE_H
+
++#include <linux/const.h>
++
+ /* FIXME: How can we replace these with values from the CPU...
+ * they shouldn't be hard-coded!
+ */
+
+-#define L1_CACHE_BYTES 16
+ #define L1_CACHE_SHIFT 4
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* __ASM_OPENRISC_CACHE_H */
+diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
+index 226f8ca9..9d9b87d 100644
+--- a/arch/parisc/include/asm/atomic.h
++++ b/arch/parisc/include/asm/atomic.h
+@@ -273,6 +273,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
+ return dec;
+ }
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* !CONFIG_64BIT */
+
+
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index 47f11c7..3420df2 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -5,6 +5,7 @@
+ #ifndef __ARCH_PARISC_CACHE_H
+ #define __ARCH_PARISC_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
+@@ -15,13 +16,13 @@
+ * just ruin performance.
+ */
+ #ifdef CONFIG_PA20
+-#define L1_CACHE_BYTES 64
+ #define L1_CACHE_SHIFT 6
+ #else
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
+ #endif
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
++
+ #ifndef __ASSEMBLY__
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
+index 78c9fd3..42fa66a 100644
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
+index 3edbb9f..08fef28 100644
+--- a/arch/parisc/include/asm/pgalloc.h
++++ b/arch/parisc/include/asm/pgalloc.h
+@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+ (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+ }
+
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
+@@ -97,6 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, x) do { } while (0)
+ #define pgd_populate(mm, pmd, pte) BUG()
++#define pgd_populate_kernel(mm, pmd, pte) BUG()
+
+ #endif
+
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index f93c4a4..cfd5663 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -231,6 +231,17 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
+ #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 0abdd4c..1af92f0 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -243,10 +243,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+ unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+ int ret = -EFAULT;
+
+- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
++ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
+ ret = __copy_from_user(to, from, n);
+ else
+ copy_from_user_overflow();
+diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
+index 3c63a82..b1d6ee9 100644
+--- a/arch/parisc/kernel/module.c
++++ b/arch/parisc/kernel/module.c
+@@ -98,16 +98,38 @@
+
+ /* three functions to determine where in the module core
+ * or init pieces the location is */
++static inline int in_init_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rx &&
++ loc < (me->module_init_rx + me->init_size_rx));
++}
++
++static inline int in_init_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rw &&
++ loc < (me->module_init_rw + me->init_size_rw));
++}
++
+ static inline int in_init(struct module *me, void *loc)
+ {
+- return (loc >= me->module_init &&
+- loc <= (me->module_init + me->init_size));
++ return in_init_rx(me, loc) || in_init_rw(me, loc);
++}
++
++static inline int in_core_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rx &&
++ loc < (me->module_core_rx + me->core_size_rx));
++}
++
++static inline int in_core_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rw &&
++ loc < (me->module_core_rw + me->core_size_rw));
+ }
+
+ static inline int in_core(struct module *me, void *loc)
+ {
+- return (loc >= me->module_core &&
+- loc <= (me->module_core + me->core_size));
++ return in_core_rx(me, loc) || in_core_rw(me, loc);
+ }
+
+ static inline int in_local(struct module *me, void *loc)
+@@ -367,13 +389,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+ }
+
+ /* align things a bit */
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.got_offset = me->core_size;
+- me->core_size += gots * sizeof(struct got_entry);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += gots * sizeof(struct got_entry);
+
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.fdesc_offset = me->core_size;
+- me->core_size += fdescs * sizeof(Elf_Fdesc);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.fdesc_offset = me->core_size_rw;
++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+@@ -391,7 +413,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+
+ BUG_ON(value == 0);
+
+- got = me->module_core + me->arch.got_offset;
++ got = me->module_core_rw + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+@@ -409,7 +431,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -427,7 +449,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+
+ /* Create new one */
+ fdesc->addr = value;
+- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -839,7 +861,7 @@ register_unwind_table(struct module *me,
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index 5aba01a..47cdd5a 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -92,6 +92,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long task_size = TASK_SIZE;
+ int do_color_align, last_mmap;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ if (len > task_size)
+ return -ENOMEM;
+@@ -109,6 +110,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ goto found_addr;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align && last_mmap)
+ addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+@@ -127,6 +132,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ info.high_limit = mmap_upper_limit();
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+ info.align_offset = shared_align_offset(last_mmap, pgoff);
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ found_addr:
+@@ -146,6 +152,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ unsigned long addr = addr0;
+ int do_color_align, last_mmap;
+ struct vm_unmapped_area_info info;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ #ifdef CONFIG_64BIT
+ /* This should only ever run for 32-bit processes. */
+@@ -170,6 +177,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ }
+
+ /* requesting a specific address */
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align && last_mmap)
+ addr = COLOR_ALIGN(addr, last_mmap, pgoff);
+@@ -187,6 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
+ info.align_offset = shared_align_offset(last_mmap, pgoff);
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+ if (!(addr & ~PAGE_MASK))
+ goto found_addr;
+@@ -252,6 +264,13 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ mm->mmap_legacy_base = mmap_legacy_base();
+ mm->mmap_base = mmap_upper_limit();
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP) {
++ mm->mmap_legacy_base += mm->delta_mmap;
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++ }
++#endif
++
+ if (mmap_is_legacy()) {
+ mm->mmap_base = mm->mmap_legacy_base;
+ mm->get_unmapped_area = arch_get_unmapped_area;
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index b99b39f..e3915ae 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -722,9 +722,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 15503ad..4b1b8b6 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -16,6 +16,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/uaccess.h>
++#include <linux/unistd.h>
+
+ #include <asm/traps.h>
+
+@@ -50,7 +51,7 @@ int show_unhandled_signals = 1;
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -136,6 +137,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int *)addr);
++ err |= get_user(bv, (unsigned int *)(addr+4));
++ err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fix;
+@@ -234,8 +345,33 @@ retry:
+
+ good_area:
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 5ef2711..21be2c3 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -415,6 +415,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
+ config KEXEC
+ bool "kexec system call"
+ depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
++ depends on !GRKERNSEC_KMEM
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
+index 512d278..d31fadd 100644
+--- a/arch/powerpc/include/asm/atomic.h
++++ b/arch/powerpc/include/asm/atomic.h
+@@ -12,6 +12,11 @@
+
+ #define ATOMIC_INIT(i) { (i) }
+
++#define _ASM_EXTABLE(from, to) \
++" .section __ex_table,\"a\"\n" \
++ PPC_LONG" " #from ", " #to"\n" \
++" .previous\n"
++
+ static __inline__ int atomic_read(const atomic_t *v)
+ {
+ int t;
+@@ -21,39 +26,80 @@ static __inline__ int atomic_read(const atomic_t *v)
+ return t;
+ }
+
++static __inline__ int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ int t;
++
++ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
++
++ return t;
++}
++
+ static __inline__ void atomic_set(atomic_t *v, int i)
+ {
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ }
+
+-#define ATOMIC_OP(op, asm_op) \
+-static __inline__ void atomic_##op(int a, atomic_t *v) \
++static __inline__ void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
++}
++
++#ifdef CONFIG_PAX_REFCOUNT
++#define __REFCOUNT_OP(op) op##o.
++#define __OVERFLOW_PRE \
++ " mcrxr cr0\n"
++#define __OVERFLOW_POST \
++ " bf 4*cr0+so, 3f\n" \
++ "2: .long 0x00c00b00\n" \
++ "3:\n"
++#define __OVERFLOW_EXTABLE \
++ "\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#else
++#define __REFCOUNT_OP(op) op
++#define __OVERFLOW_PRE
++#define __OVERFLOW_POST
++#define __OVERFLOW_EXTABLE
++#endif
++
++#define __ATOMIC_OP(op, suffix, pre_op, asm_op, post_op, extable) \
++static inline void atomic_##op##suffix(int a, atomic##suffix##_t *v) \
+ { \
+ int t; \
+ \
+ __asm__ __volatile__( \
+-"1: lwarx %0,0,%3 # atomic_" #op "\n" \
++"1: lwarx %0,0,%3 # atomic_" #op #suffix "\n" \
++ pre_op \
+ #asm_op " %0,%2,%0\n" \
++ post_op \
+ PPC405_ERR77(0,%3) \
+ " stwcx. %0,0,%3 \n" \
+ " bne- 1b\n" \
++ extable \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+ } \
+
+-#define ATOMIC_OP_RETURN(op, asm_op) \
+-static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
++#define ATOMIC_OP(op, asm_op) __ATOMIC_OP(op, , , asm_op, , ) \
++ __ATOMIC_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
++static inline int atomic_##op##_return##suffix(int a, atomic##suffix##_t *v)\
+ { \
+ int t; \
+ \
+ __asm__ __volatile__( \
+ PPC_ATOMIC_ENTRY_BARRIER \
+-"1: lwarx %0,0,%2 # atomic_" #op "_return\n" \
++"1: lwarx %0,0,%2 # atomic_" #op "_return" #suffix "\n" \
++ pre_op \
+ #asm_op " %0,%1,%0\n" \
++ post_op \
+ PPC405_ERR77(0,%2) \
+ " stwcx. %0,0,%2 \n" \
+ " bne- 1b\n" \
++ extable \
+ PPC_ATOMIC_EXIT_BARRIER \
+ : "=&r" (t) \
+ : "r" (a), "r" (&v->counter) \
+@@ -62,6 +108,9 @@ static __inline__ int atomic_##op##_return(int a, atomic_t *v) \
+ return t; \
+ }
+
++#define ATOMIC_OP_RETURN(op, asm_op) __ATOMIC_OP_RETURN(op, , , asm_op, , )\
++ __ATOMIC_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC_OPS(op, asm_op) ATOMIC_OP(op, asm_op) ATOMIC_OP_RETURN(op, asm_op)
+
+ ATOMIC_OPS(add, add)
+@@ -69,42 +118,29 @@ ATOMIC_OPS(sub, subf)
+
+ #undef ATOMIC_OPS
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+-static __inline__ void atomic_inc(atomic_t *v)
+-{
+- int t;
++/*
++ * atomic_inc - increment atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Automatically increments @v by 1
++ */
++#define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_return(v) atomic_add_return(1, (v))
+
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_inc\n\
+- addic %0,%0,1\n"
+- PPC405_ERR77(0,%2)
+-" stwcx. %0,0,%2 \n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
+ }
+
+-static __inline__ int atomic_inc_return(atomic_t *v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
+ {
+- int t;
+-
+- __asm__ __volatile__(
+- PPC_ATOMIC_ENTRY_BARRIER
+-"1: lwarx %0,0,%1 # atomic_inc_return\n\
+- addic %0,%0,1\n"
+- PPC405_ERR77(0,%1)
+-" stwcx. %0,0,%1 \n\
+- bne- 1b"
+- PPC_ATOMIC_EXIT_BARRIER
+- : "=&r" (t)
+- : "r" (&v->counter)
+- : "cc", "xer", "memory");
+-
+- return t;
++ return atomic_add_return_unchecked(1, v);
+ }
+
+ /*
+@@ -117,43 +153,38 @@ static __inline__ int atomic_inc_return(atomic_t *v)
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
+-static __inline__ void atomic_dec(atomic_t *v)
++static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
+ {
+- int t;
+-
+- __asm__ __volatile__(
+-"1: lwarx %0,0,%2 # atomic_dec\n\
+- addic %0,%0,-1\n"
+- PPC405_ERR77(0,%2)\
+-" stwcx. %0,0,%2\n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
++ return atomic_add_return_unchecked(1, v) == 0;
+ }
+
+-static __inline__ int atomic_dec_return(atomic_t *v)
++/*
++ * atomic_dec - decrement atomic variable
++ * @v: pointer of type atomic_t
++ *
++ * Atomically decrements @v by 1
++ */
++#define atomic_dec(v) atomic_sub(1, (v))
++#define atomic_dec_return(v) atomic_sub_return(1, (v))
++
++static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
+ {
+- int t;
+-
+- __asm__ __volatile__(
+- PPC_ATOMIC_ENTRY_BARRIER
+-"1: lwarx %0,0,%1 # atomic_dec_return\n\
+- addic %0,%0,-1\n"
+- PPC405_ERR77(0,%1)
+-" stwcx. %0,0,%1\n\
+- bne- 1b"
+- PPC_ATOMIC_EXIT_BARRIER
+- : "=&r" (t)
+- : "r" (&v->counter)
+- : "cc", "xer", "memory");
+-
+- return t;
++ atomic_sub_unchecked(1, v);
+ }
+
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&(v->counter), new);
++}
++
+ /**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+@@ -171,11 +202,27 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ PPC_ATOMIC_ENTRY_BARRIER
+ "1: lwarx %0,0,%1 # __atomic_add_unless\n\
+ cmpw 0,%0,%3 \n\
+- beq- 2f \n\
+- add %0,%2,%0 \n"
++ beq- 2f \n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addo. %0,%2,%0\n"
++" bf 4*cr0+so, 4f\n"
++"3:.long " "0x00c00b00""\n"
++"4:\n"
++#else
++ "add %0,%2,%0 \n"
++#endif
++
+ PPC405_ERR77(0,%2)
+ " stwcx. %0,0,%1 \n\
+ bne- 1b \n"
++"5:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(3b, 5b)
++#endif
++
+ PPC_ATOMIC_EXIT_BARRIER
+ " subf %0,%2,%0 \n\
+ 2:"
+@@ -248,6 +295,11 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
+ }
+ #define atomic_dec_if_positive atomic_dec_if_positive
+
++#define smp_mb__before_atomic_dec() smp_mb()
++#define smp_mb__after_atomic_dec() smp_mb()
++#define smp_mb__before_atomic_inc() smp_mb()
++#define smp_mb__after_atomic_inc() smp_mb()
++
+ #ifdef __powerpc64__
+
+ #define ATOMIC64_INIT(i) { (i) }
+@@ -261,37 +313,60 @@ static __inline__ long atomic64_read(const atomic64_t *v)
+ return t;
+ }
+
++static __inline__ long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ long t;
++
++ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
++
++ return t;
++}
++
+ static __inline__ void atomic64_set(atomic64_t *v, long i)
+ {
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ }
+
+-#define ATOMIC64_OP(op, asm_op) \
+-static __inline__ void atomic64_##op(long a, atomic64_t *v) \
++static __inline__ void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
++}
++
++#define __ATOMIC64_OP(op, suffix, pre_op, asm_op, post_op, extable) \
++static inline void atomic64_##op##suffix(long a, atomic64##suffix##_t *v)\
+ { \
+ long t; \
+ \
+ __asm__ __volatile__( \
+ "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
++ pre_op \
+ #asm_op " %0,%2,%0\n" \
++ post_op \
+ " stdcx. %0,0,%3 \n" \
+ " bne- 1b\n" \
++ extable \
+ : "=&r" (t), "+m" (v->counter) \
+ : "r" (a), "r" (&v->counter) \
+ : "cc"); \
+ }
+
+-#define ATOMIC64_OP_RETURN(op, asm_op) \
+-static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
++#define ATOMIC64_OP(op, asm_op) __ATOMIC64_OP(op, , , asm_op, , ) \
++ __ATOMIC64_OP(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, pre_op, asm_op, post_op, extable)\
++static inline long atomic64_##op##_return##suffix(long a, atomic64##suffix##_t *v)\
+ { \
+ long t; \
+ \
+ __asm__ __volatile__( \
+ PPC_ATOMIC_ENTRY_BARRIER \
+ "1: ldarx %0,0,%2 # atomic64_" #op "_return\n" \
++ pre_op \
+ #asm_op " %0,%1,%0\n" \
++ post_op \
+ " stdcx. %0,0,%2 \n" \
+ " bne- 1b\n" \
++ extable \
+ PPC_ATOMIC_EXIT_BARRIER \
+ : "=&r" (t) \
+ : "r" (a), "r" (&v->counter) \
+@@ -300,6 +375,9 @@ static __inline__ long atomic64_##op##_return(long a, atomic64_t *v) \
+ return t; \
+ }
+
++#define ATOMIC64_OP_RETURN(op, asm_op) __ATOMIC64_OP_RETURN(op, , , asm_op, , )\
++ __ATOMIC64_OP_RETURN(op, _unchecked, __OVERFLOW_PRE, __REFCOUNT_OP(asm_op), __OVERFLOW_POST, __OVERFLOW_EXTABLE)
++
+ #define ATOMIC64_OPS(op, asm_op) ATOMIC64_OP(op, asm_op) ATOMIC64_OP_RETURN(op, asm_op)
+
+ ATOMIC64_OPS(add, add)
+@@ -307,40 +385,33 @@ ATOMIC64_OPS(sub, subf)
+
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_EXTABLE
++#undef __OVERFLOW_POST
++#undef __OVERFLOW_PRE
++#undef __REFCOUNT_OP
+
+ #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+
+-static __inline__ void atomic64_inc(atomic64_t *v)
+-{
+- long t;
++/*
++ * atomic64_inc - increment atomic variable
++ * @v: pointer of type atomic64_t
++ *
++ * Automatically increments @v by 1
++ */
++#define atomic64_inc(v) atomic64_add(1, (v))
++#define atomic64_inc_return(v) atomic64_add_return(1, (v))
+
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_inc\n\
+- addic %0,%0,1\n\
+- stdcx. %0,0,%2 \n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
+ }
+
+-static __inline__ long atomic64_inc_return(atomic64_t *v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
+ {
+- long t;
+-
+- __asm__ __volatile__(
+- PPC_ATOMIC_ENTRY_BARRIER
+-"1: ldarx %0,0,%1 # atomic64_inc_return\n\
+- addic %0,%0,1\n\
+- stdcx. %0,0,%1 \n\
+- bne- 1b"
+- PPC_ATOMIC_EXIT_BARRIER
+- : "=&r" (t)
+- : "r" (&v->counter)
+- : "cc", "xer", "memory");
+-
+- return t;
++ return atomic64_add_return_unchecked(1, v);
+ }
+
+ /*
+@@ -353,36 +424,18 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
+ */
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+-static __inline__ void atomic64_dec(atomic64_t *v)
++/*
++ * atomic64_dec - decrement atomic variable
++ * @v: pointer of type atomic64_t
++ *
++ * Atomically decrements @v by 1
++ */
++#define atomic64_dec(v) atomic64_sub(1, (v))
++#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
++
++static __inline__ void atomic64_dec_unchecked(atomic64_unchecked_t *v)
+ {
+- long t;
+-
+- __asm__ __volatile__(
+-"1: ldarx %0,0,%2 # atomic64_dec\n\
+- addic %0,%0,-1\n\
+- stdcx. %0,0,%2\n\
+- bne- 1b"
+- : "=&r" (t), "+m" (v->counter)
+- : "r" (&v->counter)
+- : "cc", "xer");
+-}
+-
+-static __inline__ long atomic64_dec_return(atomic64_t *v)
+-{
+- long t;
+-
+- __asm__ __volatile__(
+- PPC_ATOMIC_ENTRY_BARRIER
+-"1: ldarx %0,0,%1 # atomic64_dec_return\n\
+- addic %0,%0,-1\n\
+- stdcx. %0,0,%1\n\
+- bne- 1b"
+- PPC_ATOMIC_EXIT_BARRIER
+- : "=&r" (t)
+- : "r" (&v->counter)
+- : "cc", "xer", "memory");
+-
+- return t;
++ atomic64_sub_unchecked(1, v);
+ }
+
+ #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+@@ -415,6 +468,16 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&(v->counter), new);
++}
++
+ /**
+ * atomic64_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic64_t
+@@ -430,13 +493,29 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+-"1: ldarx %0,0,%1 # __atomic_add_unless\n\
++"1: ldarx %0,0,%1 # atomic64_add_unless\n\
+ cmpd 0,%0,%3 \n\
+- beq- 2f \n\
+- add %0,%2,%0 \n"
++ beq- 2f \n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addo. %0,%2,%0\n"
++" bf 4*cr0+so, 4f\n"
++"3:.long " "0x00c00b00""\n"
++"4:\n"
++#else
++ "add %0,%2,%0 \n"
++#endif
++
+ " stdcx. %0,0,%1 \n\
+ bne- 1b \n"
+ PPC_ATOMIC_EXIT_BARRIER
++"5:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(3b, 5b)
++#endif
++
+ " subf %0,%2,%0 \n\
+ 2:"
+ : "=&r" (t)
+diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
+index 51ccc72..35de789 100644
+--- a/arch/powerpc/include/asm/barrier.h
++++ b/arch/powerpc/include/asm/barrier.h
+@@ -76,7 +76,7 @@
+ do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_lwsync(); \
+- ACCESS_ONCE(*p) = (v); \
++ ACCESS_ONCE_RW(*p) = (v); \
+ } while (0)
+
+ #define smp_load_acquire(p) \
+diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
+index 0dc42c5..b80a3a1 100644
+--- a/arch/powerpc/include/asm/cache.h
++++ b/arch/powerpc/include/asm/cache.h
+@@ -4,6 +4,7 @@
+ #ifdef __KERNEL__
+
+ #include <asm/reg.h>
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+@@ -23,7 +24,7 @@
+ #define L1_CACHE_SHIFT 7
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
+index ee46ffe..b36c98c 100644
+--- a/arch/powerpc/include/asm/elf.h
++++ b/arch/powerpc/include/asm/elf.h
+@@ -30,6 +30,18 @@
+
+ #define ELF_ET_DYN_BASE 0x20000000
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
++#endif
++
+ #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
+
+ /*
+diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
+index 8196e9c..d83a9f3 100644
+--- a/arch/powerpc/include/asm/exec.h
++++ b/arch/powerpc/include/asm/exec.h
+@@ -4,6 +4,6 @@
+ #ifndef _ASM_POWERPC_EXEC_H
+ #define _ASM_POWERPC_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_POWERPC_EXEC_H */
+diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
+index 5acabbd..7ea14fa 100644
+--- a/arch/powerpc/include/asm/kmap_types.h
++++ b/arch/powerpc/include/asm/kmap_types.h
+@@ -10,7 +10,7 @@
+ * 2 of the License, or (at your option) any later version.
+ */
+
+-#define KM_TYPE_NR 16
++#define KM_TYPE_NR 17
+
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_KMAP_TYPES_H */
+diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
+index b8da913..c02b593 100644
+--- a/arch/powerpc/include/asm/local.h
++++ b/arch/powerpc/include/asm/local.h
+@@ -9,21 +9,65 @@ typedef struct
+ atomic_long_t a;
+ } local_t;
+
++typedef struct
++{
++ atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l,i) atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l,i) atomic_long_set_unchecked(&(l)->a, (i))
+
+ #define local_add(i,l) atomic_long_add((i),(&(l)->a))
++#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
+ #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
++#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
+ #define local_inc(l) atomic_long_inc(&(l)->a)
++#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
+ #define local_dec(l) atomic_long_dec(&(l)->a)
++#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
+
+ static __inline__ long local_add_return(long a, local_t *l)
+ {
+ long t;
+
+ __asm__ __volatile__(
++"1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addo. %0,%1,%0\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" add %0,%1,%0\n"
++#endif
++
++"3:\n"
++ PPC405_ERR77(0,%2)
++ PPC_STLCX "%0,0,%2 \n\
++ bne- 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (t)
++ : "r" (a), "r" (&(l->a.counter))
++ : "cc", "memory");
++
++ return t;
++}
++
++static __inline__ long local_add_return_unchecked(long a, local_unchecked_t *l)
++{
++ long t;
++
++ __asm__ __volatile__(
+ "1:" PPC_LLARX(%0,0,%2,0) " # local_add_return\n\
+ add %0,%1,%0\n"
+ PPC405_ERR77(0,%2)
+@@ -101,6 +145,8 @@ static __inline__ long local_dec_return(local_t *l)
+
+ #define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+ #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
+
+ /**
+diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
+index 8565c25..2865190 100644
+--- a/arch/powerpc/include/asm/mman.h
++++ b/arch/powerpc/include/asm/mman.h
+@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
+ }
+ #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
+
+-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
++static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
+ {
+ return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
+ }
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index 71294a6..9e40aca 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -227,8 +227,9 @@ extern long long virt_phys_offset;
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+@@ -256,6 +257,9 @@ extern long long virt_phys_offset;
+ #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef CONFIG_PPC_BOOK3S_64
+ /*
+ * Use the top bit of the higher-level page table entries to indicate whether
+diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
+index d908a46..3753f71 100644
+--- a/arch/powerpc/include/asm/page_64.h
++++ b/arch/powerpc/include/asm/page_64.h
+@@ -172,15 +172,18 @@ do { \
+ * stack by default, so in the absence of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (is_32bit_task() ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/getorder.h>
+
+diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
+index 4b0be20..c15a27d 100644
+--- a/arch/powerpc/include/asm/pgalloc-64.h
++++ b/arch/powerpc/include/asm/pgalloc-64.h
+@@ -54,6 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ #ifndef CONFIG_PPC_64K_PAGES
+
+ #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
++#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
+
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+@@ -71,6 +72,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ pud_set(pud, (unsigned long)pmd);
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
++
+ #define pmd_populate(mm, pmd, pte_page) \
+ pmd_populate_kernel(mm, pmd, page_address(pte_page))
+ #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
+@@ -173,6 +179,7 @@ extern void __tlb_remove_table(void *_table);
+ #endif
+
+ #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
+index 11a3863..108f194 100644
+--- a/arch/powerpc/include/asm/pgtable.h
++++ b/arch/powerpc/include/asm/pgtable.h
+@@ -2,6 +2,7 @@
+ #define _ASM_POWERPC_PGTABLE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #ifndef __ASSEMBLY__
+ #include <linux/mmdebug.h>
+ #include <linux/mmzone.h>
+diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
+index 62cfb0c..50c6402 100644
+--- a/arch/powerpc/include/asm/pte-hash32.h
++++ b/arch/powerpc/include/asm/pte-hash32.h
+@@ -20,6 +20,7 @@
+ #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+ #define _PAGE_USER 0x004 /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
++#define _PAGE_EXEC _PAGE_GUARDED
+ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index af56b5c..f86f3f6 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -253,6 +253,7 @@
+ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+ #define DSISR_NOHPTE 0x40000000 /* no translation found */
++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
+ #define DSISR_PROTFAULT 0x08000000 /* protection fault */
+ #define DSISR_ISSTORE 0x02000000 /* access was a store */
+ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
+index 825663c..f9e9134 100644
+--- a/arch/powerpc/include/asm/smp.h
++++ b/arch/powerpc/include/asm/smp.h
+@@ -51,7 +51,7 @@ struct smp_ops_t {
+ int (*cpu_disable)(void);
+ void (*cpu_die)(unsigned int nr);
+ int (*cpu_bootable)(unsigned int nr);
+-};
++} __no_const;
+
+ extern void smp_send_debugger_break(void);
+ extern void start_secondary_resume(void);
+diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
+index 4dbe072..b803275 100644
+--- a/arch/powerpc/include/asm/spinlock.h
++++ b/arch/powerpc/include/asm/spinlock.h
+@@ -204,13 +204,29 @@ static inline long __arch_read_trylock(arch_rwlock_t *rw)
+ __asm__ __volatile__(
+ "1: " PPC_LWARX(%0,0,%1,1) "\n"
+ __DO_SIGN_EXTEND
+-" addic. %0,%0,1\n\
+- ble- 2f\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addico. %0,%0,1\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" addic. %0,%0,1\n"
++#endif
++
++"3:\n"
++ "ble- 4f\n"
+ PPC405_ERR77(0,%1)
+ " stwcx. %0,0,%1\n\
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+-"2:" : "=&r" (tmp)
++"4:"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ _ASM_EXTABLE(2b,4b)
++#endif
++
++ : "=&r" (tmp)
+ : "r" (&rw->lock)
+ : "cr0", "xer", "memory");
+
+@@ -286,11 +302,27 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
+ __asm__ __volatile__(
+ "# read_unlock\n\t"
+ PPC_RELEASE_BARRIER
+-"1: lwarx %0,0,%1\n\
+- addic %0,%0,-1\n"
++"1: lwarx %0,0,%1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" mcrxr cr0\n"
++" addico. %0,%0,-1\n"
++" bf 4*cr0+so, 3f\n"
++"2:.long " "0x00c00b00""\n"
++#else
++" addic. %0,%0,-1\n"
++#endif
++
++"3:\n"
+ PPC405_ERR77(0,%1)
+ " stwcx. %0,0,%1\n\
+ bne- 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r"(tmp)
+ : "r"(&rw->lock)
+ : "cr0", "xer", "memory");
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index 7efee4a..48d47cc 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -101,6 +101,8 @@ static inline struct thread_info *current_thread_info(void)
+ #if defined(CONFIG_PPC64)
+ #define TIF_ELF2ABI 18 /* function descriptors must die! */
+ #endif
++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
++#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
+
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+@@ -119,9 +121,10 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
+ #define _TIF_NOHZ (1<<TIF_NOHZ)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
+ #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
+- _TIF_NOHZ)
++ _TIF_NOHZ | _TIF_GRSEC_SETXID)
+
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 2a8ebae..5643c6f 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -58,6 +58,7 @@
+
+ #endif
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) \
+ (__chk_user_ptr(addr), \
+ __access_ok((__force unsigned long)(addr), (size), get_fs()))
+@@ -318,52 +319,6 @@ do { \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+- const void __user *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_tofrom_user((__force void __user *)to, from, n);
+- if ((unsigned long)from < TASK_SIZE) {
+- over = (unsigned long)from + n - TASK_SIZE;
+- return __copy_tofrom_user((__force void __user *)to, from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+- const void *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_WRITE, to, n))
+- return __copy_tofrom_user(to, (__force void __user *)from, n);
+- if ((unsigned long)to < TASK_SIZE) {
+- over = (unsigned long)to + n - TASK_SIZE;
+- return __copy_tofrom_user(to, (__force void __user *)from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+- __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+- unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+- unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+- unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+ {
+@@ -387,6 +342,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
+
+@@ -413,6 +372,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+ }
+
+@@ -430,6 +393,92 @@ static inline unsigned long __copy_to_user(void __user *to,
+ return __copy_to_user_inatomic(to, from, size);
+ }
+
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++ const void __user *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_READ, from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ return __copy_tofrom_user((__force void __user *)to, from, n);
++ }
++ if ((unsigned long)from < TASK_SIZE) {
++ over = (unsigned long)from + n - TASK_SIZE;
++ if (!__builtin_constant_p(n - over))
++ check_object_size(to, n - over, false);
++ return __copy_tofrom_user((__force void __user *)to, from,
++ n - over) + over;
++ }
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++ const void *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_WRITE, to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return __copy_tofrom_user(to, (__force void __user *)from, n);
++ }
++ if ((unsigned long)to < TASK_SIZE) {
++ over = (unsigned long)to + n - TASK_SIZE;
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n - over, true);
++ return __copy_tofrom_user(to, (__force void __user *)from,
++ n - over) + over;
++ }
++ return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++ __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
++ if (likely(access_ok(VERIFY_READ, from, n)))
++ n = __copy_from_user(to, from, n);
++ else
++ memset(to, 0, n);
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ n = __copy_to_user(to, from, n);
++ }
++ return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++ unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index 12868b1..5155667 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -14,6 +14,11 @@ CFLAGS_prom_init.o += -fPIC
+ CFLAGS_btext.o += -fPIC
+ endif
+
++CFLAGS_REMOVE_cputable.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
++CFLAGS_REMOVE_prom_init.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
++CFLAGS_REMOVE_btext.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
++CFLAGS_REMOVE_prom.o = $(LATENT_ENTROPY_PLUGIN_CFLAGS)
++
+ ifdef CONFIG_FUNCTION_TRACER
+ # Do not trace early boot code
+ CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
+@@ -26,6 +31,8 @@ CFLAGS_REMOVE_ftrace.o = -pg -mno-sched-epilog
+ CFLAGS_REMOVE_time.o = -pg -mno-sched-epilog
+ endif
+
++CFLAGS_REMOVE_prom_init.o += $(LATENT_ENTROPY_PLUGIN_CFLAGS)
++
+ obj-y := cputable.o ptrace.o syscalls.o \
+ irq.o align.o signal_32.o pmc.o vdso.o \
+ process.o systbl.o idle.o \
+diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
+index 3e68d1c..72a5ee6 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -1010,6 +1010,7 @@ storage_fault_common:
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl save_nvgprs
+ mr r4,r14
+ mr r5,r15
+ ld r14,PACA_EXGEN+EX_R14(r13)
+@@ -1018,8 +1019,7 @@ storage_fault_common:
+ cmpdi r3,0
+ bne- 1f
+ b ret_from_except_lite
+-1: bl save_nvgprs
+- mr r5,r3
++1: mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ld r4,_DAR(r1)
+ bl bad_page_fault
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 0a0399c2..262a2e6 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -1591,10 +1591,10 @@ handle_page_fault:
+ 11: ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl save_nvgprs
+ bl do_page_fault
+ cmpdi r3,0
+ beq+ 12f
+- bl save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 4509603..cdb491f 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -460,6 +460,8 @@ void migrate_irqs(void)
+ }
+ #endif
+
++extern void gr_handle_kernel_exploit(void);
++
+ static inline void check_stack_overflow(void)
+ {
+ #ifdef CONFIG_DEBUG_STACKOVERFLOW
+@@ -472,6 +474,7 @@ static inline void check_stack_overflow(void)
+ pr_err("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
++ gr_handle_kernel_exploit();
+ }
+ #endif
+ }
+diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
+index c94d2e0..992a9ce 100644
+--- a/arch/powerpc/kernel/module_32.c
++++ b/arch/powerpc/kernel/module_32.c
+@@ -158,7 +158,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- pr_err("Module doesn't contain .plt or .init.plt sections.\n");
++ pr_err("Module $s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -188,11 +188,16 @@ static uint32_t do_plt_call(void *location,
+
+ pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->module_core
+- && location < mod->module_core + mod->core_size)
++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+@@ -296,7 +301,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ }
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ module->arch.tramp =
+- do_plt_call(module->module_core,
++ do_plt_call(module->module_core_rx,
+ (unsigned long)ftrace_caller,
+ sechdrs, module);
+ #endif
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 64e6e9d..cf90ed5 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1033,8 +1033,8 @@ void show_regs(struct pt_regs * regs)
+ * Lookup NIP late so we have the best change of getting the
+ * above info out without failing
+ */
+- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
+ #endif
+ show_stack(current, (unsigned long *) regs->gpr[1]);
+ if (!user_mode(regs))
+@@ -1550,10 +1550,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ newsp = stack[0];
+ ip = stack[STACK_FRAME_LR_SAVE];
+ if (!firstframe || ip != lr) {
+- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((ip == rth) && curr_frame >= 0) {
+- printk(" (%pS)",
++ printk(" (%pA)",
+ (void *)current->ret_stack[curr_frame].ret);
+ curr_frame--;
+ }
+@@ -1573,7 +1573,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ struct pt_regs *regs = (struct pt_regs *)
+ (sp + STACK_FRAME_OVERHEAD);
+ lr = regs->link;
+- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
++ printk("--- interrupt: %lx at %pA\n LR = %pA\n",
+ regs->trap, (void *)regs->nip, (void *)lr);
+ firstframe = 1;
+ }
+@@ -1609,49 +1609,3 @@ void notrace __ppc64_runlatch_off(void)
+ mtspr(SPRN_CTRLT, ctrl);
+ }
+ #endif /* CONFIG_PPC64 */
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = 0;
+-
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+- else
+- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+-
+- return rnd << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+-#ifdef CONFIG_PPC_STD_MMU_64
+- /*
+- * If we are using 1TB segments and we are allowed to randomise
+- * the heap, we can put it above 1TB so it is backed by a 1TB
+- * segment. Otherwise the heap will be in the bottom 1TB
+- * which always uses 256MB segments and this may result in a
+- * performance penalty.
+- */
+- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
+-#endif
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index f21897b..28c0428 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -1762,6 +1762,10 @@ long arch_ptrace(struct task_struct *child, long request,
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+ * We must return the syscall number to actually look up in the table.
+ * This can be -1L to skip running any syscall at all.
+@@ -1774,6 +1778,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+
+ secure_computing_strict(regs->gpr[0]);
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+@@ -1805,6 +1814,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
+ {
+ int step;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index da50e0c..5ff6307 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -1009,7 +1009,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ addr = frame;
+- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ sigret = 0;
+ tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+ } else {
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index c7c24d2..1bf7039 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -754,7 +754,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs
+ current->thread.fp_state.fpscr = 0;
+
+ /* Set up to return from userspace. */
+- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+ } else {
+ err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 37de90f..12472ac 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -36,6 +36,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/ratelimit.h>
+ #include <linux/context_tracking.h>
++#include <linux/uaccess.h>
+
+ #include <asm/emulated_ops.h>
+ #include <asm/pgtable.h>
+@@ -142,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
+ return flags;
+ }
+
++extern void gr_handle_kernel_exploit(void);
++
+ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+ int signr)
+ {
+@@ -191,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
++
++ gr_handle_kernel_exploit();
++
+ do_exit(signr);
+ }
+
+@@ -1139,6 +1145,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
+ enum ctx_state prev_state = exception_enter();
+ unsigned int reason = get_reason(regs);
+
++#ifdef CONFIG_PAX_REFCOUNT
++ unsigned int bkpt;
++ const struct exception_table_entry *entry;
++
++ if (reason & REASON_ILLEGAL) {
++ /* Check if PaX bad instruction */
++ if (!probe_kernel_address(regs->nip, bkpt) && bkpt == 0xc00b00) {
++ current->thread.trap_nr = 0;
++ pax_report_refcount_overflow(regs);
++ /* fixup_exception() for PowerPC does not exist, simulate its job */
++ if ((entry = search_exception_tables(regs->nip)) != NULL) {
++ regs->nip = entry->fixup;
++ return;
++ }
++ /* fixup_exception() could not handle */
++ goto bail;
++ }
++ }
++#endif
++
+ /* We can now get here via a FP Unavailable exception if the core
+ * has no FPU, in that case the reason flags will be 0 */
+
+diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
+index b457bfa..9018cde 100644
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -34,6 +34,7 @@
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
+ #include <asm/setup.h>
++#include <asm/mman.h>
+
+ #undef DEBUG
+
+@@ -179,7 +180,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = VDSO32_MBASE;
+ #endif
+
+- current->mm->context.vdso_base = 0;
++ current->mm->context.vdso_base = ~0UL;
+
+ /* vDSO has a problem and was disabled, just don't "enable" it for the
+ * process
+@@ -199,7 +200,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+ (vdso_pages << PAGE_SHIFT) +
+ ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+- 0, 0);
++ 0, MAP_PRIVATE | MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index e5dde32..557af3d 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -1404,7 +1404,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids_param)
+ }
+ EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
+
+-int kvm_arch_init(void *opaque)
++int kvm_arch_init(const void *opaque)
+ {
+ return 0;
+ }
+diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
+index 5eea6f3..5d10396 100644
+--- a/arch/powerpc/lib/usercopy_64.c
++++ b/arch/powerpc/lib/usercopy_64.c
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n)))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_WRITE, to, n)))
+- n = __copy_to_user(to, from, n);
+- return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
+ return n;
+ }
+
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index a67c6d7..a662e6d 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -34,6 +34,10 @@
+ #include <linux/context_tracking.h>
+ #include <linux/hugetlb.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int __user *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -227,7 +258,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -384,12 +415,16 @@ good_area:
+ * "undefined". Of those that can be set, this is the only
+ * one which seems bad.
+ */
+- if (error_code & 0x10000000)
++ if (error_code & DSISR_GUARDED)
+ /* Guarded storage error. */
+ goto bad_area;
+ #endif /* CONFIG_8xx */
+
+ if (is_exec) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (error_code & DSISR_GUARDED)
++ goto bad_area;
++#endif
+ /*
+ * Allow execution from readable areas if the MMU does not
+ * provide separate controls over reading and executing.
+@@ -484,6 +519,23 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++ if (is_exec && regs->nip == address) {
++#endif
++ switch (pax_handle_fetch_fault(regs)) {
++ }
++
++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ _exception(SIGSEGV, regs, code, address);
+ goto bail;
+ }
+diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
+index 0f0502e..bc3e7a3 100644
+--- a/arch/powerpc/mm/mmap.c
++++ b/arch/powerpc/mm/mmap.c
+@@ -86,6 +86,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+@@ -95,9 +99,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
+index 0f432a7..abfe841 100644
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, addr, len, 0);
+ }
+
+ static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
+ info.align_offset = 0;
+
+ addr = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr += mm->delta_mmap;
++#endif
++
+ while (addr < TASK_SIZE) {
+ info.low_limit = addr;
+ if (!slice_scan_available(addr, available, 1, &addr))
+@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+ if (fixed && addr > (mm->task_size - len))
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++ addr = 0;
++#endif
++
+ /* If hint, make sure it matches our alignment restrictions */
+ if (!fixed && addr) {
+ addr = _ALIGN_UP(addr, 1ul << pshift);
+diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
+index d966bbe..372124a 100644
+--- a/arch/powerpc/platforms/cell/spufs/file.c
++++ b/arch/powerpc/platforms/cell/spufs/file.c
+@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ return VM_FAULT_NOPAGE;
+ }
+
+-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
++static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
+ unsigned long address,
+- void *buf, int len, int write)
++ void *buf, size_t len, int write)
+ {
+ struct spu_context *ctx = vma->vm_file->private_data;
+ unsigned long offset = address - vma->vm_start;
+diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
+index c56878e..073d04e 100644
+--- a/arch/s390/Kconfig.debug
++++ b/arch/s390/Kconfig.debug
+@@ -21,6 +21,7 @@ config S390_PTDUMP
+ bool "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ Say Y here if you want to show the kernel pagetable layout in a
+ debugfs file. This information is only useful for kernel developers
+diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
+index adbe380..adb7516 100644
+--- a/arch/s390/include/asm/atomic.h
++++ b/arch/s390/include/asm/atomic.h
+@@ -317,4 +317,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
+ #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* __ARCH_S390_ATOMIC__ */
+diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
+index e6f8615..4a66339 100644
+--- a/arch/s390/include/asm/barrier.h
++++ b/arch/s390/include/asm/barrier.h
+@@ -42,7 +42,7 @@
+ do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+- ACCESS_ONCE(*p) = (v); \
++ ACCESS_ONCE_RW(*p) = (v); \
+ } while (0)
+
+ #define smp_load_acquire(p) \
+diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
+index 4d7ccac..d03d0ad 100644
+--- a/arch/s390/include/asm/cache.h
++++ b/arch/s390/include/asm/cache.h
+@@ -9,8 +9,10 @@
+ #ifndef __ARCH_S390_CACHE_H
+ #define __ARCH_S390_CACHE_H
+
+-#define L1_CACHE_BYTES 256
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 8
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define NET_SKB_PAD 32
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
+index 3ad48f2..64cc6f3 100644
+--- a/arch/s390/include/asm/elf.h
++++ b/arch/s390/include/asm/elf.h
+@@ -163,6 +163,13 @@ extern unsigned int vdso_enabled;
+ (STACK_TOP / 3 * 2) : \
+ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. */
+
+diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
+index c4a93d6..4d2a9b4 100644
+--- a/arch/s390/include/asm/exec.h
++++ b/arch/s390/include/asm/exec.h
+@@ -7,6 +7,6 @@
+ #ifndef __ASM_EXEC_H
+ #define __ASM_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* __ASM_EXEC_H */
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index 9dd4cc4..36f4b84 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -59,6 +59,7 @@ static inline int __range_ok(unsigned long addr, unsigned long size)
+ __range_ok((unsigned long)(addr), (size)); \
+ })
+
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) __access_ok(addr, size)
+
+ /*
+@@ -278,6 +279,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ return __copy_to_user(to, from, n);
+ }
+
+@@ -307,10 +312,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- unsigned int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+
+ might_fault();
+- if (unlikely(sz != -1 && sz < n)) {
++
++ if ((long)n < 0)
++ return n;
++
++ if (unlikely(sz != (size_t)-1 && sz < n)) {
+ copy_from_user_overflow();
+ return n;
+ }
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index 0c1a679..e1df357 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -159,11 +159,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+
+ /* Increase core size by size of got & plt and set start
+ offsets for got and plt. */
+- me->core_size = ALIGN(me->core_size, 4);
+- me->arch.got_offset = me->core_size;
+- me->core_size += me->arch.got_size;
+- me->arch.plt_offset = me->core_size;
+- me->core_size += me->arch.plt_size;
++ me->core_size_rw = ALIGN(me->core_size_rw, 4);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += me->arch.got_size;
++ me->arch.plt_offset = me->core_size_rx;
++ me->core_size_rx += me->arch.plt_size;
+ return 0;
+ }
+
+@@ -279,7 +279,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ if (info->got_initialized == 0) {
+ Elf_Addr *gotent;
+
+- gotent = me->module_core + me->arch.got_offset +
++ gotent = me->module_core_rw + me->arch.got_offset +
+ info->got_offset;
+ *gotent = val;
+ info->got_initialized = 1;
+@@ -302,7 +302,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ rc = apply_rela_bits(loc, val, 0, 64, 0);
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT) {
+- val += (Elf_Addr) me->module_core - loc;
++ val += (Elf_Addr) me->module_core_rw - loc;
+ rc = apply_rela_bits(loc, val, 1, 32, 1);
+ }
+ break;
+@@ -315,7 +315,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
+ if (info->plt_initialized == 0) {
+ unsigned int *ip;
+- ip = me->module_core + me->arch.plt_offset +
++ ip = me->module_core_rx + me->arch.plt_offset +
+ info->plt_offset;
+ ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
+ ip[1] = 0x100a0004;
+@@ -334,7 +334,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ val - loc + 0xffffUL < 0x1ffffeUL) ||
+ (r_type == R_390_PLT32DBL &&
+ val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+- val = (Elf_Addr) me->module_core +
++ val = (Elf_Addr) me->module_core_rx +
+ me->arch.plt_offset +
+ info->plt_offset;
+ val += rela->r_addend - loc;
+@@ -356,7 +356,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
+ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
+ val = val + rela->r_addend -
+- ((Elf_Addr) me->module_core + me->arch.got_offset);
++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
+ if (r_type == R_390_GOTOFF16)
+ rc = apply_rela_bits(loc, val, 0, 16, 0);
+ else if (r_type == R_390_GOTOFF32)
+@@ -366,7 +366,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ break;
+ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
+ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
+- val = (Elf_Addr) me->module_core + me->arch.got_offset +
++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
+ rela->r_addend - loc;
+ if (r_type == R_390_GOTPC)
+ rc = apply_rela_bits(loc, val, 1, 32, 0);
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index 8f587d8..0642516b 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -200,27 +200,3 @@ unsigned long get_wchan(struct task_struct *p)
+ }
+ return 0;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+- else
+- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long ret;
+-
+- ret = PAGE_ALIGN(mm->brk + brk_rnd());
+- return (ret > mm->brk) ? ret : mm->brk;
+-}
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index 6e552af..3e608a1 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -239,6 +239,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE)
+ random_factor = arch_mmap_rnd();
+
+@@ -248,9 +252,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = mmap_base_legacy(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area;
+ } else {
+ mm->mmap_base = mmap_base(random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
+index ae3d59f..f65f075 100644
+--- a/arch/score/include/asm/cache.h
++++ b/arch/score/include/asm/cache.h
+@@ -1,7 +1,9 @@
+ #ifndef _ASM_SCORE_CACHE_H
+ #define _ASM_SCORE_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_SCORE_CACHE_H */
+diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
+index f9f3cd5..58ff438 100644
+--- a/arch/score/include/asm/exec.h
++++ b/arch/score/include/asm/exec.h
+@@ -1,6 +1,6 @@
+ #ifndef _ASM_SCORE_EXEC_H
+ #define _ASM_SCORE_EXEC_H
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) (x)
+
+ #endif /* _ASM_SCORE_EXEC_H */
+diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
+index a1519ad3..e8ac1ff 100644
+--- a/arch/score/kernel/process.c
++++ b/arch/score/kernel/process.c
+@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
+
+ return task_pt_regs(task)->cp0_epc;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- return sp;
+-}
+diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
+index ef9e555..331bd29 100644
+--- a/arch/sh/include/asm/cache.h
++++ b/arch/sh/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #define __ASM_SH_CACHE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #include <linux/init.h>
+ #include <cpu/cache.h>
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
+
+diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
+index 6777177..cb5e44f 100644
+--- a/arch/sh/mm/mmap.c
++++ b/arch/sh/mm/mmap.c
+@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int do_colour_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (flags & MAP_FIXED) {
+@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_colour_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+- info.low_limit = TASK_UNMAPPED_BASE;
++ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ int do_colour_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (flags & MAP_FIXED) {
+@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (filp || (flags & MAP_SHARED))
+ do_colour_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ if (do_colour_align)
+@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = TASK_SIZE;
+ addr = vm_unmapped_area(&info);
+ }
+diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
+index 2e48eb8..c90930d 100644
+--- a/arch/sparc/crypto/aes_glue.c
++++ b/arch/sparc/crypto/aes_glue.c
+@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key,
+ .encrypt = ctr_crypt,
+ .decrypt = ctr_crypt,
+diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
+index 6bf2479..561a84d 100644
+--- a/arch/sparc/crypto/camellia_glue.c
++++ b/arch/sparc/crypto/camellia_glue.c
+@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
+ .blkcipher = {
+ .min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .max_keysize = CAMELLIA_MAX_KEY_SIZE,
++ .ivsize = CAMELLIA_BLOCK_SIZE,
+ .setkey = camellia_set_key,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
+index dd6a34f..61af794 100644
+--- a/arch/sparc/crypto/des_glue.c
++++ b/arch/sparc/crypto/des_glue.c
+@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
+ .blkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
++ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des_set_key,
+ .encrypt = cbc_encrypt,
+ .decrypt = cbc_decrypt,
+@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
+ .blkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = des3_ede_set_key,
+ .encrypt = cbc3_encrypt,
+ .decrypt = cbc3_decrypt,
+diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
+index 4082749..fd97781 100644
+--- a/arch/sparc/include/asm/atomic_64.h
++++ b/arch/sparc/include/asm/atomic_64.h
+@@ -15,18 +15,38 @@
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) ACCESS_ONCE((v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return ACCESS_ONCE(v->counter);
++}
+ #define atomic64_read(v) ACCESS_ONCE((v)->counter)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return ACCESS_ONCE(v->counter);
++}
+
+ #define atomic_set(v, i) (((v)->counter) = i)
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+ #define atomic64_set(v, i) (((v)->counter) = i)
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
+
+-#define ATOMIC_OP(op) \
+-void atomic_##op(int, atomic_t *); \
+-void atomic64_##op(long, atomic64_t *);
++#define __ATOMIC_OP(op, suffix) \
++void atomic_##op##suffix(int, atomic##suffix##_t *); \
++void atomic64_##op##suffix(long, atomic64##suffix##_t *);
+
+-#define ATOMIC_OP_RETURN(op) \
+-int atomic_##op##_return(int, atomic_t *); \
+-long atomic64_##op##_return(long, atomic64_t *);
++#define ATOMIC_OP(op) __ATOMIC_OP(op, ) __ATOMIC_OP(op, _unchecked)
++
++#define __ATOMIC_OP_RETURN(op, suffix) \
++int atomic_##op##_return##suffix(int, atomic##suffix##_t *); \
++long atomic64_##op##_return##suffix(long, atomic64##suffix##_t *);
++
++#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, ) __ATOMIC_OP_RETURN(op, _unchecked)
+
+ #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+
+@@ -35,13 +55,23 @@ ATOMIC_OPS(sub)
+
+ #undef ATOMIC_OPS
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+ #define atomic_dec_return(v) atomic_sub_return(1, v)
+ #define atomic64_dec_return(v) atomic64_sub_return(1, v)
+
+ #define atomic_inc_return(v) atomic_add_return(1, v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic64_inc_return(v) atomic64_add_return(1, v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_return_unchecked(1, v);
++}
+
+ /*
+ * atomic_inc_and_test - increment and test
+@@ -52,6 +82,10 @@ ATOMIC_OPS(sub)
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_inc_return_unchecked(v) == 0;
++}
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+@@ -61,25 +95,60 @@ ATOMIC_OPS(sub)
+ #define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic64_inc(v) atomic64_add(1, v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
++}
+
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+ #define atomic64_dec(v) atomic64_sub(1, v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_sub_unchecked(1, v);
++}
+
+ #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+ #define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0)
+
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%icc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+@@ -90,20 +159,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ #define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%xcc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
+index 809941e..b443309 100644
+--- a/arch/sparc/include/asm/barrier_64.h
++++ b/arch/sparc/include/asm/barrier_64.h
+@@ -60,7 +60,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
+ do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+- ACCESS_ONCE(*p) = (v); \
++ ACCESS_ONCE_RW(*p) = (v); \
+ } while (0)
+
+ #define smp_load_acquire(p) \
+diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
+index 5bb6991..5c2132e 100644
+--- a/arch/sparc/include/asm/cache.h
++++ b/arch/sparc/include/asm/cache.h
+@@ -7,10 +7,12 @@
+ #ifndef _SPARC_CACHE_H
+ #define _SPARC_CACHE_H
+
++#include <linux/const.h>
++
+ #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES 32
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #ifdef CONFIG_SPARC32
+ #define SMP_CACHE_BYTES_SHIFT 5
+diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
+index a24e41f..47677ff 100644
+--- a/arch/sparc/include/asm/elf_32.h
++++ b/arch/sparc/include/asm/elf_32.h
+@@ -114,6 +114,13 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
+index 370ca1e..d4f4a98 100644
+--- a/arch/sparc/include/asm/elf_64.h
++++ b/arch/sparc/include/asm/elf_64.h
+@@ -189,6 +189,13 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
++#endif
++
+ extern unsigned long sparc64_elf_hwcap;
+ #define ELF_HWCAP sparc64_elf_hwcap
+
+diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
+index a3890da..f6a408e 100644
+--- a/arch/sparc/include/asm/pgalloc_32.h
++++ b/arch/sparc/include/asm/pgalloc_32.h
+@@ -35,6 +35,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+ }
+
+ #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
+
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
+ unsigned long address)
+diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
+index 5e31871..13469c6 100644
+--- a/arch/sparc/include/asm/pgalloc_64.h
++++ b/arch/sparc/include/asm/pgalloc_64.h
+@@ -21,6 +21,7 @@ static inline void __pgd_populate(pgd_t *pgd, pud_t *pud)
+ }
+
+ #define pgd_populate(MM, PGD, PUD) __pgd_populate(PGD, PUD)
++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
+
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+@@ -38,6 +39,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
+ }
+
+ #define pud_populate(MM, PUD, PMD) __pud_populate(PUD, PMD)
++#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
+
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+diff --git a/arch/sparc/include/asm/pgtable.h b/arch/sparc/include/asm/pgtable.h
+index 59ba6f6..4518128 100644
+--- a/arch/sparc/include/asm/pgtable.h
++++ b/arch/sparc/include/asm/pgtable.h
+@@ -5,4 +5,8 @@
+ #else
+ #include <asm/pgtable_32.h>
+ #endif
++
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #endif
+diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
+index f06b36a..bca3189 100644
+--- a/arch/sparc/include/asm/pgtable_32.h
++++ b/arch/sparc/include/asm/pgtable_32.h
+@@ -51,6 +51,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
+ #define PAGE_SHARED SRMMU_PAGE_SHARED
+ #define PAGE_COPY SRMMU_PAGE_COPY
+ #define PAGE_READONLY SRMMU_PAGE_RDONLY
++#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
++#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
++#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
+ #define PAGE_KERNEL SRMMU_PAGE_KERNEL
+
+ /* Top-level page directory - dummy used by init-mm.
+@@ -63,18 +66,18 @@ extern unsigned long ptr_in_current_pgd;
+
+ /* xwr */
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
+ #define __P100 PAGE_READONLY
+ #define __P101 PAGE_READONLY
+ #define __P110 PAGE_COPY
+ #define __P111 PAGE_COPY
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
+ #define __S100 PAGE_READONLY
+ #define __S101 PAGE_READONLY
+ #define __S110 PAGE_SHARED
+diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
+index ae51a11..eadfd03 100644
+--- a/arch/sparc/include/asm/pgtsrmmu.h
++++ b/arch/sparc/include/asm/pgtsrmmu.h
+@@ -111,6 +111,11 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
+index 29d64b1..4272fe8 100644
+--- a/arch/sparc/include/asm/setup.h
++++ b/arch/sparc/include/asm/setup.h
+@@ -55,8 +55,8 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs);
+ void handle_ld_nf(u32 insn, struct pt_regs *regs);
+
+ /* init_64.c */
+-extern atomic_t dcpage_flushes;
+-extern atomic_t dcpage_flushes_xcall;
++extern atomic_unchecked_t dcpage_flushes;
++extern atomic_unchecked_t dcpage_flushes_xcall;
+
+ extern int sysctl_tsb_ratio;
+ #endif
+diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
+index 9689176..63c18ea 100644
+--- a/arch/sparc/include/asm/spinlock_64.h
++++ b/arch/sparc/include/asm/spinlock_64.h
+@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
+
+ /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
+
+-static void inline arch_read_lock(arch_rwlock_t *lock)
++static inline void arch_read_lock(arch_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__ (
+ "1: ldsw [%2], %0\n"
+ " brlz,pn %0, 2f\n"
+-"4: add %0, 1, %1\n"
++"4: addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
+ " .previous"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+- : "memory");
++ : "memory", "cc");
+ }
+
+-static int inline arch_read_trylock(arch_rwlock_t *lock)
++static inline int arch_read_trylock(arch_rwlock_t *lock)
+ {
+ int tmp1, tmp2;
+
+@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+ "1: ldsw [%2], %0\n"
+ " brlz,a,pn %0, 2f\n"
+ " mov 0, %0\n"
+-" add %0, 1, %1\n"
++" addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
+ return tmp1;
+ }
+
+-static void inline arch_read_unlock(arch_rwlock_t *lock)
++static inline void arch_read_unlock(arch_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__(
+ "1: lduw [%2], %0\n"
+-" sub %0, 1, %1\n"
++" subcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%xcc, 1b\n"
+@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
+ : "memory");
+ }
+
+-static void inline arch_write_lock(arch_rwlock_t *lock)
++static inline void arch_write_lock(arch_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2;
+
+@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
+ : "memory");
+ }
+
+-static void inline arch_write_unlock(arch_rwlock_t *lock)
++static inline void arch_write_unlock(arch_rwlock_t *lock)
+ {
+ __asm__ __volatile__(
+ " stw %%g0, [%0]"
+@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
+ : "memory");
+ }
+
+-static int inline arch_write_trylock(arch_rwlock_t *lock)
++static inline int arch_write_trylock(arch_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2, result;
+
+diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
+index 229475f..2fca9163 100644
+--- a/arch/sparc/include/asm/thread_info_32.h
++++ b/arch/sparc/include/asm/thread_info_32.h
+@@ -48,6 +48,7 @@ struct thread_info {
+ struct reg_window32 reg_window[NSWINS]; /* align for ldd! */
+ unsigned long rwbuf_stkptrs[NSWINS];
+ unsigned long w_saved;
++ unsigned long lowest_stack;
+ };
+
+ /*
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
+index bde5982..9cbb56d 100644
+--- a/arch/sparc/include/asm/thread_info_64.h
++++ b/arch/sparc/include/asm/thread_info_64.h
+@@ -59,6 +59,8 @@ struct thread_info {
+ struct pt_regs *kern_una_regs;
+ unsigned int kern_una_insn;
+
++ unsigned long lowest_stack;
++
+ unsigned long fpregs[(7 * 256) / sizeof(unsigned long)]
+ __attribute__ ((aligned(64)));
+ };
+@@ -180,12 +182,13 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+ /* flag bit 4 is available */
+ #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
+-/* flag bit 6 is available */
++#define TIF_GRSEC_SETXID 6 /* update credentials on syscall entry/exit */
+ #define TIF_32BIT 7 /* 32-bit binary */
+ #define TIF_NOHZ 8 /* in adaptive nohz mode */
+ #define TIF_SECCOMP 9 /* secure computing */
+ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
++
+ /* NOTE: Thread flags >= 12 should be ones we have no interest
+ * in using in assembly, else we can't use the mask as
+ * an immediate value in instructions such as andcc.
+@@ -205,12 +208,17 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+ #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
+
+ #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
+ _TIF_DO_NOTIFY_RESUME_MASK | \
+ _TIF_NEED_RESCHED)
+ #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
+
++#define _TIF_WORK_SYSCALL \
++ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
++ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
++
+ #define is_32bit_task() (test_thread_flag(TIF_32BIT))
+
+ /*
+diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
+index bd56c28..4b63d83 100644
+--- a/arch/sparc/include/asm/uaccess.h
++++ b/arch/sparc/include/asm/uaccess.h
+@@ -1,5 +1,6 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
+index 64ee103..388aef0 100644
+--- a/arch/sparc/include/asm/uaccess_32.h
++++ b/arch/sparc/include/asm/uaccess_32.h
+@@ -47,6 +47,7 @@
+ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; })
+ #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+ #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
++#define access_ok_noprefault(type, addr, size) access_ok((type), (addr), (size))
+ #define access_ok(type, addr, size) \
+ ({ (void)(type); __access_ok((unsigned long)(addr), size); })
+
+@@ -313,27 +314,46 @@ unsigned long __copy_user(void __user *to, const void __user *from, unsigned lon
+
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) to, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_user(to, (__force void __user *) from, n);
+ }
+
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) from, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_user((__force void __user *) to, from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ return __copy_user((__force void __user *) to, from, n);
+ }
+
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index ea6e9a2..5703598 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
++#include <linux/kernel.h>
+ #include <asm/asi.h>
+ #include <asm/spitfire.h>
+ #include <asm-generic/uaccess-unaligned.h>
+@@ -76,6 +77,11 @@ static inline int __access_ok(const void __user * addr, unsigned long size)
+ return 1;
+ }
+
++static inline int access_ok_noprefault(int type, const void __user * addr, unsigned long size)
++{
++ return 1;
++}
++
+ static inline int access_ok(int type, const void __user * addr, unsigned long size)
+ {
+ return 1;
+@@ -250,8 +256,15 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from,
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_from_user(to, from, size);
++ unsigned long ret;
+
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(to, size, false);
++
++ ret = ___copy_from_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+
+@@ -267,8 +280,15 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from,
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_to_user(to, from, size);
++ unsigned long ret;
+
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(from, size, true);
++
++ ret = ___copy_to_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_to_user_fixup(to, from, size);
+ return ret;
+diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
+index 7cf9c6e..6206648 100644
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -4,7 +4,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+
+diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
+index 50e7b62..79fae35 100644
+--- a/arch/sparc/kernel/process_32.c
++++ b/arch/sparc/kernel/process_32.c
+@@ -123,14 +123,14 @@ void show_regs(struct pt_regs *r)
+
+ printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
+ r->psr, r->pc, r->npc, r->y, print_tainted());
+- printk("PC: <%pS>\n", (void *) r->pc);
++ printk("PC: <%pA>\n", (void *) r->pc);
+ printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
+ r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
+ printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
+ r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
+
+ printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+@@ -167,7 +167,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ rw = (struct reg_window32 *) fp;
+ pc = rw->ins[7];
+ printk("[%08lx : ", pc);
+- printk("%pS ] ", (void *) pc);
++ printk("%pA ] ", (void *) pc);
+ fp = rw->ins[6];
+ } while (++count < 16);
+ printk("\n");
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index 46a5964..a35c62c 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -161,7 +161,7 @@ static void show_regwindow(struct pt_regs *regs)
+ printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+ rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+ if (regs->tstate & TSTATE_PRIV)
+- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
++ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
+ }
+
+ void show_regs(struct pt_regs *regs)
+@@ -170,7 +170,7 @@ void show_regs(struct pt_regs *regs)
+
+ printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
+ regs->tpc, regs->tnpc, regs->y, print_tainted());
+- printk("TPC: <%pS>\n", (void *) regs->tpc);
++ printk("TPC: <%pA>\n", (void *) regs->tpc);
+ printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+@@ -183,7 +183,7 @@ void show_regs(struct pt_regs *regs)
+ printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
+ show_regwindow(regs);
+ show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
+ }
+@@ -278,7 +278,7 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
+ ((tp && tp->task) ? tp->task->pid : -1));
+
+ if (gp->tstate & TSTATE_PRIV) {
+- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
+ (void *) gp->tpc,
+ (void *) gp->o7,
+ (void *) gp->i7,
+diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
+index 79cc0d1..ec62734 100644
+--- a/arch/sparc/kernel/prom_common.c
++++ b/arch/sparc/kernel/prom_common.c
+@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
+
+ unsigned int prom_early_allocated __initdata;
+
+-static struct of_pdt_ops prom_sparc_ops __initdata = {
++static struct of_pdt_ops prom_sparc_ops __initconst = {
+ .nextprop = prom_common_nextprop,
+ .getproplen = prom_getproplen,
+ .getproperty = prom_getproperty,
+diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
+index 9ddc492..27a5619 100644
+--- a/arch/sparc/kernel/ptrace_64.c
++++ b/arch/sparc/kernel/ptrace_64.c
+@@ -1060,6 +1060,10 @@ long arch_ptrace(struct task_struct *child, long request,
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ {
+ int ret = 0;
+@@ -1070,6 +1074,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ if (test_thread_flag(TIF_NOHZ))
+ user_exit();
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ret = tracehook_report_syscall_entry(regs);
+
+@@ -1088,6 +1097,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+ if (test_thread_flag(TIF_NOHZ))
+ user_exit();
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 19cd08d..ff21e99 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -891,7 +891,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+ return;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+
+ this_cpu = get_cpu();
+@@ -915,7 +915,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpumask_of(cpu));
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes_xcall);
++ atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+ }
+ }
+@@ -934,7 +934,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+ preempt_disable();
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+ data0 = 0;
+ pg_addr = page_address(page);
+@@ -951,7 +951,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
+ xcall_deliver(data0, __pa(pg_addr),
+ (u64) pg_addr, cpu_online_mask);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes_xcall);
++ atomic_inc_unchecked(&dcpage_flushes_xcall);
+ #endif
+ }
+ __local_flush_dcache_page(page);
+diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
+index 646988d..b88905f 100644
+--- a/arch/sparc/kernel/sys_sparc_32.c
++++ b/arch/sparc/kernel/sys_sparc_32.c
+@@ -54,7 +54,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (len > TASK_SIZE - PAGE_SIZE)
+ return -ENOMEM;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ info.flags = 0;
+ info.length = len;
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 30e7ddb..266a3b0 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -89,13 +89,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ struct vm_area_struct * vma;
+ unsigned long task_size = TASK_SIZE;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -110,6 +111,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOR_ALIGN(addr, pgoff);
+@@ -117,22 +122,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+ info.flags = 0;
+ info.length = len;
+- info.low_limit = TASK_UNMAPPED_BASE;
++ info.low_limit = mm->mmap_base;
+ info.high_limit = min(task_size, VA_EXCLUDE_START);
+ info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.low_limit = VA_EXCLUDE_END;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = task_size;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -150,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ unsigned long task_size = STACK_TOP32;
+ unsigned long addr = addr0;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ struct vm_unmapped_area_info info;
+
+ /* This should only ever run for 32-bit processes. */
+@@ -159,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -172,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ /* requesting a specific address */
+ if (addr) {
+ if (do_color_align)
+@@ -180,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+
+@@ -191,6 +206,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ /*
+@@ -203,6 +219,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = STACK_TOP32;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -259,10 +281,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
+ EXPORT_SYMBOL(get_fb_unmapped_area);
+
+ /* Essentially the same as PowerPC. */
+-static unsigned long mmap_rnd(void)
++static unsigned long mmap_rnd(struct mm_struct *mm)
+ {
+ unsigned long rnd = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE) {
+ unsigned long val = get_random_int();
+ if (test_thread_flag(TIF_32BIT))
+@@ -275,7 +301,7 @@ static unsigned long mmap_rnd(void)
+
+ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+- unsigned long random_factor = mmap_rnd();
++ unsigned long random_factor = mmap_rnd(mm);
+ unsigned long gap;
+
+ /*
+@@ -288,6 +314,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ gap == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ } else {
+ /* We know it's 32-bit */
+@@ -299,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ }
+ }
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index bb00089..e0ea580 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
+ #endif
+ .align 32
+ 1: ldx [%g6 + TI_FLAGS], %l5
+- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l5, _TIF_WORK_SYSCALL, %g0
+ be,pt %icc, rtrap
+ nop
+ call syscall_trace_leave
+@@ -194,7 +194,7 @@ linux_sparc_syscall32:
+
+ srl %i3, 0, %o3 ! IEU0
+ srl %i2, 0, %o2 ! IEU0 Group
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ bne,pn %icc, linux_syscall_trace32 ! CTI
+ mov %i0, %l5 ! IEU1
+ 5: call %l7 ! CTI Group brk forced
+@@ -218,7 +218,7 @@ linux_sparc_syscall:
+
+ mov %i3, %o3 ! IEU1
+ mov %i4, %o4 ! IEU0 Group
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ bne,pn %icc, linux_syscall_trace ! CTI Group
+ mov %i0, %l5 ! IEU0
+ 2: call %l7 ! CTI Group brk forced
+@@ -233,7 +233,7 @@ ret_sys_call:
+
+ cmp %o0, -ERESTART_RESTARTBLOCK
+ bgeu,pn %xcc, 1f
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+
+ 2:
+diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
+index 4f21df7..0a374da 100644
+--- a/arch/sparc/kernel/traps_32.c
++++ b/arch/sparc/kernel/traps_32.c
+@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
+ #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
+ #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
+
++extern void gr_handle_kernel_exploit(void);
++
+ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -76,15 +78,17 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ count++ < 30 &&
+ (((unsigned long) rw) >= PAGE_OFFSET) &&
+ !(((unsigned long) rw) & 0x7)) {
+- printk("Caller[%08lx]: %pS\n", rw->ins[7],
++ printk("Caller[%08lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+ rw = (struct reg_window32 *)rw->ins[6];
+ }
+ }
+ printk("Instruction DUMP:");
+ instruction_dump ((unsigned long *) regs->pc);
+- if(regs->psr & PSR_PS)
++ if(regs->psr & PSR_PS) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index d21cd62..00a4a17 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -79,7 +79,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
+ i + 1,
+ p->trapstack[i].tstate, p->trapstack[i].tpc,
+ p->trapstack[i].tnpc, p->trapstack[i].tt);
+- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
+ }
+ }
+
+@@ -99,6 +99,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
+
+ lvl -= 0x100;
+ if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+ die_if_kernel(buffer, regs);
+ }
+@@ -117,11 +123,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+ char buffer[32];
+-
++
+ if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+@@ -1151,7 +1162,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
+ regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
+ printk("%s" "ERROR(%d): ",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
+- printk("TPC<%pS>\n", (void *) regs->tpc);
++ printk("TPC<%pA>\n", (void *) regs->tpc);
+ printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+@@ -1758,7 +1769,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
+ panic("Irrecoverable Cheetah+ parity error.");
+ }
+
+@@ -1766,7 +1777,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
+ }
+
+ struct sun4v_error_entry {
+@@ -1839,8 +1850,8 @@ struct sun4v_error_entry {
+ /*0x38*/u64 reserved_5;
+ };
+
+-static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
+-static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
++static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
+
+ static const char *sun4v_err_type_to_str(u8 type)
+ {
+@@ -1932,7 +1943,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
+ }
+
+ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
+- int cpu, const char *pfx, atomic_t *ocnt)
++ int cpu, const char *pfx, atomic_unchecked_t *ocnt)
+ {
+ u64 *raw_ptr = (u64 *) ent;
+ u32 attrs;
+@@ -1990,8 +2001,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
+
+ show_regs(regs);
+
+- if ((cnt = atomic_read(ocnt)) != 0) {
+- atomic_set(ocnt, 0);
++ if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
++ atomic_set_unchecked(ocnt, 0);
+ wmb();
+ printk("%s: Queue overflowed %d times.\n",
+ pfx, cnt);
+@@ -2048,7 +2059,7 @@ out:
+ */
+ void sun4v_resum_overflow(struct pt_regs *regs)
+ {
+- atomic_inc(&sun4v_resum_oflow_cnt);
++ atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
+ }
+
+ /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
+@@ -2101,7 +2112,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
+ /* XXX Actually even this can make not that much sense. Perhaps
+ * XXX we should just pull the plug and panic directly from here?
+ */
+- atomic_inc(&sun4v_nonresum_oflow_cnt);
++ atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
+ }
+
+ static void sun4v_tlb_error(struct pt_regs *regs)
+@@ -2120,9 +2131,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -2143,9 +2154,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -2362,13 +2373,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ fp = (unsigned long)sf->fp + STACK_BIAS;
+ }
+
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((pc + 8UL) == (unsigned long) &return_to_handler) {
+ int index = tsk->curr_ret_stack;
+ if (tsk->ret_stack && index >= graph) {
+ pc = tsk->ret_stack[index - graph].ret;
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ graph++;
+ }
+ }
+@@ -2386,6 +2397,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+ return (struct reg_window *) (fp + STACK_BIAS);
+ }
+
++extern void gr_handle_kernel_exploit(void);
++
+ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -2414,7 +2427,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ while (rw &&
+ count++ < 30 &&
+ kstack_valid(tp, (unsigned long) rw)) {
+- printk("Caller[%016lx]: %pS\n", rw->ins[7],
++ printk("Caller[%016lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+
+ rw = kernel_stack_up(rw);
+@@ -2429,8 +2442,10 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
+ }
+ if (panic_on_oops)
+ panic("Fatal exception");
+- if (regs->tstate & TSTATE_PRIV)
++ if (regs->tstate & TSTATE_PRIV) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+ EXPORT_SYMBOL(die_if_kernel);
+diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
+index 62098a8..547ab2c 100644
+--- a/arch/sparc/kernel/unaligned_64.c
++++ b/arch/sparc/kernel/unaligned_64.c
+@@ -297,7 +297,7 @@ static void log_unaligned(struct pt_regs *regs)
+ static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
+
+ if (__ratelimit(&ratelimit)) {
+- printk("Kernel unaligned access at TPC[%lx] %pS\n",
++ printk("Kernel unaligned access at TPC[%lx] %pA\n",
+ regs->tpc, (void *) regs->tpc);
+ }
+ }
+diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
+index 3269b02..64f5231 100644
+--- a/arch/sparc/lib/Makefile
++++ b/arch/sparc/lib/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi -DST_DIV0=0x02
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ lib-$(CONFIG_SPARC32) += ashrdi3.o
+ lib-$(CONFIG_SPARC32) += memcpy.o memset.o
+diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
+index 05dac43..76f8ed4 100644
+--- a/arch/sparc/lib/atomic_64.S
++++ b/arch/sparc/lib/atomic_64.S
+@@ -15,11 +15,22 @@
+ * a value and does the barriers.
+ */
+
+-#define ATOMIC_OP(op) \
+-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
++#ifdef CONFIG_PAX_REFCOUNT
++#define __REFCOUNT_OP(op) op##cc
++#define __OVERFLOW_IOP tvs %icc, 6;
++#define __OVERFLOW_XOP tvs %xcc, 6;
++#else
++#define __REFCOUNT_OP(op) op
++#define __OVERFLOW_IOP
++#define __OVERFLOW_XOP
++#endif
++
++#define __ATOMIC_OP(op, suffix, asm_op, post_op) \
++ENTRY(atomic_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+ 1: lduw [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ cas [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
+@@ -29,11 +40,15 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+ ENDPROC(atomic_##op); \
+
+-#define ATOMIC_OP_RETURN(op) \
+-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
++#define ATOMIC_OP(op) __ATOMIC_OP(op, , op, ) \
++ __ATOMIC_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
++
++#define __ATOMIC_OP_RETURN(op, suffix, asm_op, post_op) \
++ENTRY(atomic_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
+ BACKOFF_SETUP(%o2); \
+ 1: lduw [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ cas [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
+@@ -43,6 +58,9 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+ ENDPROC(atomic_##op##_return);
+
++#define ATOMIC_OP_RETURN(op) __ATOMIC_OP_RETURN(op, , op, ) \
++ __ATOMIC_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_IOP)
++
+ #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+
+ ATOMIC_OPS(add)
+@@ -50,13 +68,16 @@ ATOMIC_OPS(sub)
+
+ #undef ATOMIC_OPS
+ #undef ATOMIC_OP_RETURN
++#undef __ATOMIC_OP_RETURN
+ #undef ATOMIC_OP
++#undef __ATOMIC_OP
+
+-#define ATOMIC64_OP(op) \
+-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
++#define __ATOMIC64_OP(op, suffix, asm_op, post_op) \
++ENTRY(atomic64_##op##suffix) /* %o0 = increment, %o1 = atomic_ptr */ \
+ BACKOFF_SETUP(%o2); \
+ 1: ldx [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ casx [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
+@@ -66,11 +87,15 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+ ENDPROC(atomic64_##op); \
+
+-#define ATOMIC64_OP_RETURN(op) \
+-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
++#define ATOMIC64_OP(op) __ATOMIC64_OP(op, , op, ) \
++ __ATOMIC64_OP(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
++
++#define __ATOMIC64_OP_RETURN(op, suffix, asm_op, post_op) \
++ENTRY(atomic64_##op##_return##suffix) /* %o0 = increment, %o1 = atomic_ptr */\
+ BACKOFF_SETUP(%o2); \
+ 1: ldx [%o1], %g1; \
+- op %g1, %o0, %g7; \
++ asm_op %g1, %o0, %g7; \
++ post_op \
+ casx [%o1], %g1, %g7; \
+ cmp %g1, %g7; \
+ bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
+@@ -80,6 +105,9 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
+ ENDPROC(atomic64_##op##_return);
+
++#define ATOMIC64_OP_RETURN(op) __ATOMIC64_OP_RETURN(op, , op, ) \
++i __ATOMIC64_OP_RETURN(op, _unchecked, __REFCOUNT_OP(op), __OVERFLOW_XOP)
++
+ #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
+
+ ATOMIC64_OPS(add)
+@@ -87,7 +115,12 @@ ATOMIC64_OPS(sub)
+
+ #undef ATOMIC64_OPS
+ #undef ATOMIC64_OP_RETURN
++#undef __ATOMIC64_OP_RETURN
+ #undef ATOMIC64_OP
++#undef __ATOMIC64_OP
++#undef __OVERFLOW_XOP
++#undef __OVERFLOW_IOP
++#undef __REFCOUNT_OP
+
+ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
+index 8069ce1..c2e23c4 100644
+--- a/arch/sparc/lib/ksyms.c
++++ b/arch/sparc/lib/ksyms.c
+@@ -101,7 +101,9 @@ EXPORT_SYMBOL(__clear_user);
+ /* Atomic counter implementation. */
+ #define ATOMIC_OP(op) \
+ EXPORT_SYMBOL(atomic_##op); \
+-EXPORT_SYMBOL(atomic64_##op);
++EXPORT_SYMBOL(atomic_##op##_unchecked); \
++EXPORT_SYMBOL(atomic64_##op); \
++EXPORT_SYMBOL(atomic64_##op##_unchecked);
+
+ #define ATOMIC_OP_RETURN(op) \
+ EXPORT_SYMBOL(atomic_##op##_return); \
+@@ -110,6 +112,8 @@ EXPORT_SYMBOL(atomic64_##op##_return);
+ #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+
+ ATOMIC_OPS(add)
++EXPORT_SYMBOL(atomic_add_ret_unchecked);
++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
+ ATOMIC_OPS(sub)
+
+ #undef ATOMIC_OPS
+diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
+index 30c3ecc..736f015 100644
+--- a/arch/sparc/mm/Makefile
++++ b/arch/sparc/mm/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
+ obj-y += fault_$(BITS).o
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index c399e7b..2387414 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -22,6 +22,9 @@
+ #include <linux/interrupt.h>
+ #include <linux/kdebug.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -156,6 +159,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->pc);
++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->pc);
++
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++ unsigned int addr;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, bajmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ else
++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->pc-4));
++ err |= get_user(call, (unsigned int *)regs->pc);
++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+ int text_fault)
+ {
+@@ -226,6 +500,24 @@ good_area:
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index dbabe57..d34d315 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -23,6 +23,9 @@
+ #include <linux/percpu.h>
+ #include <linux/context_tracking.h>
+ #include <linux/uaccess.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -76,7 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
+ printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+ regs->tpc);
+ printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
+- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
+ printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
+ dump_stack();
+ unhandled_fault(regs->tpc, current, regs);
+@@ -279,6 +282,466 @@ static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
+ show_regs(regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ INIT_LIST_HEAD(&vma->anon_vma_chain);
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->tpc);
++
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++ unsigned long addr;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, bajmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ else
++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int sethi, mov1, call, mov2;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(call, (unsigned int *)(regs->tpc+8));
++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or, (unsigned int *)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020U &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++
++ /* PaX: 64-bit PLT stub */
++ err = get_user(sethi1, (unsigned int *)addr);
++ err |= get_user(sethi2, (unsigned int *)(addr+4));
++ err |= get_user(or1, (unsigned int *)(addr+8));
++ err |= get_user(or2, (unsigned int *)(addr+12));
++ err |= get_user(sllx, (unsigned int *)(addr+16));
++ err |= get_user(add, (unsigned int *)(addr+20));
++ err |= get_user(jmpl, (unsigned int *)(addr+24));
++ err |= get_user(nop, (unsigned int *)(addr+28));
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x88112000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x89293020U &&
++ add == 0x8A010005U &&
++ jmpl == 0x89C14000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G4] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
++ regs->u_regs[UREG_G4] = addr + 24;
++ addr = regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->tpc-4));
++ err |= get_user(call, (unsigned int *)regs->tpc);
++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ dl_resolve &= 0xFFFFFFFFUL;
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (ba & 0xFFF00000U) == 0x30600000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ enum ctx_state prev_state = exception_enter();
+@@ -353,6 +816,29 @@ retry:
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index 131eaf4..285ea31 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -25,8 +25,10 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+- unsigned long flags)
++ unsigned long flags,
++ unsigned long offset)
+ {
++ struct mm_struct *mm = current->mm;
+ unsigned long task_size = TASK_SIZE;
+ struct vm_unmapped_area_info info;
+
+@@ -35,15 +37,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+
+ info.flags = 0;
+ info.length = len;
+- info.low_limit = TASK_UNMAPPED_BASE;
++ info.low_limit = mm->mmap_base;
+ info.high_limit = min(task_size, VA_EXCLUDE_START);
+ info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.low_limit = VA_EXCLUDE_END;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = task_size;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -55,7 +64,8 @@ static unsigned long
+ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len,
+ const unsigned long pgoff,
+- const unsigned long flags)
++ const unsigned long flags,
++ const unsigned long offset)
+ {
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+@@ -70,6 +80,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.high_limit = mm->mmap_base;
+ info.align_mask = PAGE_MASK & ~HPAGE_MASK;
+ info.align_offset = 0;
++ info.threadstack_offset = offset;
+ addr = vm_unmapped_area(&info);
+
+ /*
+@@ -82,6 +93,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ info.low_limit += mm->delta_mmap;
++#endif
++
+ info.high_limit = STACK_TOP32;
+ addr = vm_unmapped_area(&info);
+ }
+@@ -96,6 +113,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long task_size = TASK_SIZE;
++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+
+ if (test_thread_flag(TIF_32BIT))
+ task_size = STACK_TOP32;
+@@ -111,19 +129,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ return addr;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+ return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+- pgoff, flags);
++ pgoff, flags, offset);
+ else
+ return hugetlb_get_unmapped_area_topdown(file, addr, len,
+- pgoff, flags);
++ pgoff, flags, offset);
+ }
+
+ pte_t *huge_pte_alloc(struct mm_struct *mm,
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 4ac88b7..bac6cb2 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -187,9 +187,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
+ int num_kernel_image_mappings;
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+-atomic_t dcpage_flushes = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
+ #ifdef CONFIG_SMP
+-atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
++atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
+ #endif
+ #endif
+
+@@ -197,7 +197,7 @@ inline void flush_dcache_page_impl(struct page *page)
+ {
+ BUG_ON(tlb_type == hypervisor);
+ #ifdef CONFIG_DEBUG_DCFLUSH
+- atomic_inc(&dcpage_flushes);
++ atomic_inc_unchecked(&dcpage_flushes);
+ #endif
+
+ #ifdef DCACHE_ALIASING_POSSIBLE
+@@ -469,10 +469,10 @@ void mmu_info(struct seq_file *m)
+
+ #ifdef CONFIG_DEBUG_DCFLUSH
+ seq_printf(m, "DCPageFlushes\t: %d\n",
+- atomic_read(&dcpage_flushes));
++ atomic_read_unchecked(&dcpage_flushes));
+ #ifdef CONFIG_SMP
+ seq_printf(m, "DCPageFlushesXC\t: %d\n",
+- atomic_read(&dcpage_flushes_xcall));
++ atomic_read_unchecked(&dcpage_flushes_xcall));
+ #endif /* CONFIG_SMP */
+ #endif /* CONFIG_DEBUG_DCFLUSH */
+ }
+diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
+index 9def1f5..cf0cabc 100644
+--- a/arch/tile/Kconfig
++++ b/arch/tile/Kconfig
+@@ -204,6 +204,7 @@ source "kernel/Kconfig.hz"
+
+ config KEXEC
+ bool "kexec system call"
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
+index 0496970..1a57e5f 100644
+--- a/arch/tile/include/asm/atomic_64.h
++++ b/arch/tile/include/asm/atomic_64.h
+@@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* _ASM_TILE_ATOMIC_64_H */
+diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
+index 6160761..00cac88 100644
+--- a/arch/tile/include/asm/cache.h
++++ b/arch/tile/include/asm/cache.h
+@@ -15,11 +15,12 @@
+ #ifndef _ASM_TILE_CACHE_H
+ #define _ASM_TILE_CACHE_H
+
++#include <linux/const.h>
+ #include <arch/chip.h>
+
+ /* bytes per L1 data cache line */
+ #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /* bytes per L2 cache line */
+ #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
+diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
+index 0a9c4265..bfb62d1 100644
+--- a/arch/tile/include/asm/uaccess.h
++++ b/arch/tile/include/asm/uaccess.h
+@@ -429,9 +429,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
+ const void __user *from,
+ unsigned long n)
+ {
+- int sz = __compiletime_object_size(to);
++ size_t sz = __compiletime_object_size(to);
+
+- if (likely(sz == -1 || sz >= n))
++ if (likely(sz == (size_t)-1 || sz >= n))
+ n = _copy_from_user(to, from, n);
+ else
+ copy_from_user_overflow();
+diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
+index c034dc3..cf1cc96 100644
+--- a/arch/tile/mm/hugetlbpage.c
++++ b/arch/tile/mm/hugetlbpage.c
+@@ -174,6 +174,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+ info.high_limit = TASK_SIZE;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.align_offset = 0;
++ info.threadstack_offset = 0;
+ return vm_unmapped_area(&info);
+ }
+
+@@ -191,6 +192,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+ info.high_limit = current->mm->mmap_base;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.align_offset = 0;
++ info.threadstack_offset = 0;
+ addr = vm_unmapped_area(&info);
+
+ /*
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 098ab33..fc54a33 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -73,6 +73,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
+ -D_FILE_OFFSET_BITS=64 -idirafter include \
+ -D__KERNEL__ -D__UM_HOST__
+
++ifdef CONSTIFY_PLUGIN
++USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
++
+ #This will adjust *FLAGS accordingly to the platform.
+ include $(ARCH_DIR)/Makefile-os-$(OS)
+
+diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
+index 19e1bdd..3665b77 100644
+--- a/arch/um/include/asm/cache.h
++++ b/arch/um/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef __UM_CACHE_H
+ #define __UM_CACHE_H
+
++#include <linux/const.h>
+
+ #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
+ # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+@@ -12,6 +13,6 @@
+ # define L1_CACHE_SHIFT 5
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif
+diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
+index 2e0a6b1..a64d0f5 100644
+--- a/arch/um/include/asm/kmap_types.h
++++ b/arch/um/include/asm/kmap_types.h
+@@ -8,6 +8,6 @@
+
+ /* No more #include "asm/arch/kmap_types.h" ! */
+
+-#define KM_TYPE_NR 14
++#define KM_TYPE_NR 15
+
+ #endif
+diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
+index 71c5d13..4c7b9f1 100644
+--- a/arch/um/include/asm/page.h
++++ b/arch/um/include/asm/page.h
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ struct page;
+diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
+index 2b4274e..754fe06 100644
+--- a/arch/um/include/asm/pgtable-3level.h
++++ b/arch/um/include/asm/pgtable-3level.h
+@@ -58,6 +58,7 @@
+ #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
+ #define pud_populate(mm, pud, pmd) \
+ set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ #ifdef CONFIG_64BIT
+ #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 68b9119..f72353c 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -345,22 +345,6 @@ int singlestepping(void * t)
+ return 2;
+ }
+
+-/*
+- * Only x86 and x86_64 have an arch_align_stack().
+- * All other arches have "#define arch_align_stack(x) (x)"
+- * in their asm/exec.h
+- * As this is included in UML from asm-um/system-generic.h,
+- * we can use it to behave as the subarch does.
+- */
+-#ifndef arch_align_stack
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-#endif
+-
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ unsigned long stack_page, sp, ip;
+diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
+index ad8f795..2c7eec6 100644
+--- a/arch/unicore32/include/asm/cache.h
++++ b/arch/unicore32/include/asm/cache.h
+@@ -12,8 +12,10 @@
+ #ifndef __UNICORE_CACHE_H__
+ #define __UNICORE_CACHE_H__
+
+-#define L1_CACHE_SHIFT (5)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#include <linux/const.h>
++
++#define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index b3a1a5d..8dbc2d6 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -35,13 +35,12 @@ config X86
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
+- select ARCH_SUPPORTS_INT128 if X86_64
++ select ARCH_SUPPORTS_INT128 if X86_64 && !PAX_SIZE_OVERFLOW
+ select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF if X86_64
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
+- select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANT_FRAME_POINTERS
+ select ARCH_WANT_IPC_PARSE_VERSION if X86_32
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+@@ -85,7 +84,7 @@ config X86
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select HAVE_BPF_JIT if X86_64
+- select HAVE_CC_STACKPROTECTOR
++ select HAVE_CC_STACKPROTECTOR if X86_64 || !PAX_MEMORY_UDEREF
+ select HAVE_CMPXCHG_DOUBLE
+ select HAVE_CMPXCHG_LOCAL
+ select HAVE_CONTEXT_TRACKING if X86_64
+@@ -274,7 +273,7 @@ config X86_64_SMP
+
+ config X86_32_LAZY_GS
+ def_bool y
+- depends on X86_32 && !CC_STACKPROTECTOR
++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+
+ config ARCH_HWEIGHT_CFLAGS
+ string
+@@ -646,6 +645,7 @@ config SCHED_OMIT_FRAME_POINTER
+
+ menuconfig HYPERVISOR_GUEST
+ bool "Linux guest support"
++ depends on !GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_VIRT_GUEST || (GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_XEN)
+ ---help---
+ Say Y here to enable options for running Linux under various hyper-
+ visors. This option enables basic hypervisor detection and platform
+@@ -1014,6 +1014,7 @@ config VM86
+
+ config X86_16BIT
+ bool "Enable support for 16-bit segments" if EXPERT
++ depends on !GRKERNSEC
+ default y
+ ---help---
+ This option is required by programs like Wine to run 16-bit
+@@ -1182,6 +1183,7 @@ choice
+
+ config NOHIGHMEM
+ bool "off"
++ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ However, the address space of 32-bit x86 processors is only 4
+@@ -1218,6 +1220,7 @@ config NOHIGHMEM
+
+ config HIGHMEM4G
+ bool "4GB"
++ depends on !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Select this if you have a 32-bit processor and between 1 and 4
+ gigabytes of physical RAM.
+@@ -1270,7 +1273,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1290,7 +1293,6 @@ config X86_PAE
+
+ config ARCH_PHYS_ADDR_T_64BIT
+ def_bool y
+- depends on X86_64 || X86_PAE
+
+ config ARCH_DMA_ADDR_T_64BIT
+ def_bool y
+@@ -1724,6 +1726,7 @@ source kernel/Kconfig.hz
+
+ config KEXEC
+ bool "kexec system call"
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -1906,7 +1909,9 @@ config X86_NEED_RELOCS
+
+ config PHYSICAL_ALIGN
+ hex "Alignment value to which kernel should be aligned"
+- default "0x200000"
++ default "0x1000000"
++ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
++ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
+ range 0x2000 0x1000000 if X86_32
+ range 0x200000 0x1000000 if X86_64
+ ---help---
+@@ -1989,6 +1994,7 @@ config COMPAT_VDSO
+ def_bool n
+ prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
+ depends on X86_32 || IA32_EMULATION
++ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ ---help---
+ Certain buggy versions of glibc will crash if they are
+ presented with a 32-bit vDSO that is not mapped at the address
+@@ -2053,6 +2059,22 @@ config CMDLINE_OVERRIDE
+ This is used to work around broken boot loaders. This should
+ be set to 'N' under normal conditions.
+
++config DEFAULT_MODIFY_LDT_SYSCALL
++ bool "Allow userspace to modify the LDT by default"
++ default y
++
++ ---help---
++ Modifying the LDT (Local Descriptor Table) may be needed to run a
++ 16-bit or segmented code such as Dosemu or Wine. This is done via
++ a system call which is not needed to run portable applications,
++ and which can sometimes be abused to exploit some weaknesses of
++ the architecture, opening new vulnerabilities.
++
++ For this reason this option allows one to enable or disable the
++ feature at runtime. It is recommended to say 'N' here to leave
++ the system protected, and to enable it at runtime only if needed
++ by setting the sys.kernel.modify_ldt sysctl.
++
+ source "kernel/livepatch/Kconfig"
+
+ endmenu
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index 6983314..54ad7e8 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486
++ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
+
+ config X86_INVD_BUG
+ def_bool y
+@@ -327,7 +327,7 @@ config X86_INVD_BUG
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -369,7 +369,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index d8c0d32..28e3117 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -69,6 +69,7 @@ config X86_PTDUMP
+ bool "Export kernel pagetable layout to userspace via debugfs"
+ depends on DEBUG_KERNEL
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ Say Y here if you want to show the kernel pagetable layout in a
+ debugfs file. This information is only useful for kernel developers
+@@ -89,7 +90,7 @@ config EFI_PGT_DUMP
+ config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ default y
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && BROKEN
+ ---help---
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+@@ -107,7 +108,7 @@ config DEBUG_RODATA_TEST
+
+ config DEBUG_SET_MODULE_RONX
+ bool "Set loadable kernel module data as NX and text as RO"
+- depends on MODULES
++ depends on MODULES && BROKEN
+ ---help---
+ This option helps catch unintended modifications to loadable
+ kernel module's text and read-only data. It also prevents execution
+@@ -359,6 +360,7 @@ config X86_DEBUG_FPU
+ config PUNIT_ATOM_DEBUG
+ tristate "ATOM Punit debug driver"
+ select DEBUG_FS
++ depends on !GRKERNSEC_KMEM
+ select IOSF_MBI
+ ---help---
+ This is a debug driver, which gets the power states
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 118e6de..e02efff 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -65,9 +65,6 @@ ifeq ($(CONFIG_X86_32),y)
+ # CPU-specific tuning. Anything which can be shared with UML should go here.
+ include arch/x86/Makefile_32.cpu
+ KBUILD_CFLAGS += $(cflags-y)
+-
+- # temporary until string.h is fixed
+- KBUILD_CFLAGS += -ffreestanding
+ else
+ BITS := 64
+ UTS_MACHINE := x86_64
+@@ -116,6 +113,9 @@ else
+ KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args)
+ endif
+
++# temporary until string.h is fixed
++KBUILD_CFLAGS += -ffreestanding
++
+ # Make sure compiler does not have buggy stack-protector support.
+ ifdef CONFIG_CC_STACKPROTECTOR
+ cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
+@@ -184,6 +184,7 @@ archheaders:
+ $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
+
+ archprepare:
++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+ ifeq ($(CONFIG_KEXEC_FILE),y)
+ $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
+ endif
+@@ -267,3 +268,9 @@ define archhelp
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 57bbf2f..b100fce 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -58,6 +58,9 @@ clean-files += cpustr.h
+ # ---------------------------------------------------------------------------
+
+ KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
+index 878e4b9..20537ab 100644
+--- a/arch/x86/boot/bitops.h
++++ b/arch/x86/boot/bitops.h
+@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+ u8 v;
+ const u32 *p = (const u32 *)addr;
+
+- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ return v;
+ }
+
+@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+
+ static inline void set_bit(int nr, void *addr)
+ {
+- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+
+ #endif /* BOOT_BITOPS_H */
+diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
+index bd49ec6..94c7f58 100644
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -84,7 +84,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+ u16 seg;
+- asm("movw %%ds,%0" : "=rm" (seg));
++ asm volatile("movw %%ds,%0" : "=rm" (seg));
+ return seg;
+ }
+
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index 0a291cd..9686efc 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -30,6 +30,9 @@ KBUILD_CFLAGS += $(cflags-y)
+ KBUILD_CFLAGS += -mno-mmx -mno-sse
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
+
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
+index a53440e..c3dbf1e 100644
+--- a/arch/x86/boot/compressed/efi_stub_32.S
++++ b/arch/x86/boot/compressed/efi_stub_32.S
+@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
+ * parameter 2, ..., param n. To make things easy, we save the return
+ * address of efi_call_phys in a global variable.
+ */
+- popl %ecx
+- movl %ecx, saved_return_addr(%edx)
+- /* get the function pointer into ECX*/
+- popl %ecx
+- movl %ecx, efi_rt_function_ptr(%edx)
++ popl saved_return_addr(%edx)
++ popl efi_rt_function_ptr(%edx)
+
+ /*
+ * 3. Call the physical function.
+ */
+- call *%ecx
++ call *efi_rt_function_ptr(%edx)
+
+ /*
+ * 4. Balance the stack. And because EAX contain the return value,
+@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
+ 1: popl %edx
+ subl $1b, %edx
+
+- movl efi_rt_function_ptr(%edx), %ecx
+- pushl %ecx
++ pushl efi_rt_function_ptr(%edx)
+
+ /*
+ * 10. Push the saved return address onto the stack and return.
+ */
+- movl saved_return_addr(%edx), %ecx
+- pushl %ecx
+- ret
++ jmpl *saved_return_addr(%edx)
+ ENDPROC(efi_call_phys)
+ .previous
+
+diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
+index 630384a..278e788 100644
+--- a/arch/x86/boot/compressed/efi_thunk_64.S
++++ b/arch/x86/boot/compressed/efi_thunk_64.S
+@@ -189,8 +189,8 @@ efi_gdt64:
+ .long 0 /* Filled out by user */
+ .word 0
+ .quad 0x0000000000000000 /* NULL descriptor */
+- .quad 0x00af9a000000ffff /* __KERNEL_CS */
+- .quad 0x00cf92000000ffff /* __KERNEL_DS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
+ .quad 0x0080890000000000 /* TS descriptor */
+ .quad 0x0000000000000000 /* TS continued */
+ efi_gdt64_end:
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index 8ef964d..fcfb8aa 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -141,10 +141,10 @@ preferred_addr:
+ addl %eax, %ebx
+ notl %eax
+ andl %eax, %ebx
+- cmpl $LOAD_PHYSICAL_ADDR, %ebx
++ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
+ #endif
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ 1:
+
+ /* Target address to relocate to for decompression */
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index b0c0d16..3b44ff8 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -95,10 +95,10 @@ ENTRY(startup_32)
+ addl %eax, %ebx
+ notl %eax
+ andl %eax, %ebx
+- cmpl $LOAD_PHYSICAL_ADDR, %ebx
++ cmpl $____LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
+ #endif
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ 1:
+
+ /* Target address to relocate to for decompression */
+@@ -323,10 +323,10 @@ preferred_addr:
+ addq %rax, %rbp
+ notq %rax
+ andq %rax, %rbp
+- cmpq $LOAD_PHYSICAL_ADDR, %rbp
++ cmpq $____LOAD_PHYSICAL_ADDR, %rbp
+ jge 1f
+ #endif
+- movq $LOAD_PHYSICAL_ADDR, %rbp
++ movq $____LOAD_PHYSICAL_ADDR, %rbp
+ 1:
+
+ /* Target address to relocate to for decompression */
+@@ -435,8 +435,8 @@ gdt:
+ .long gdt
+ .word 0
+ .quad 0x0000000000000000 /* NULL descriptor */
+- .quad 0x00af9a000000ffff /* __KERNEL_CS */
+- .quad 0x00cf92000000ffff /* __KERNEL_DS */
++ .quad 0x00af9b000000ffff /* __KERNEL_CS */
++ .quad 0x00cf93000000ffff /* __KERNEL_DS */
+ .quad 0x0080890000000000 /* TS descriptor */
+ .quad 0x0000000000000000 /* TS continued */
+ gdt_end:
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index e28437e..6a17460 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -242,7 +242,7 @@ static void handle_relocations(void *output, unsigned long output_len)
+ * Calculate the delta between where vmlinux was linked to load
+ * and where it was actually loaded.
+ */
+- delta = min_addr - LOAD_PHYSICAL_ADDR;
++ delta = min_addr - ____LOAD_PHYSICAL_ADDR;
+ if (!delta) {
+ debug_putstr("No relocation needed... ");
+ return;
+@@ -324,7 +324,7 @@ static void parse_elf(void *output)
+ Elf32_Ehdr ehdr;
+ Elf32_Phdr *phdrs, *phdr;
+ #endif
+- void *dest;
++ void *dest, *prev;
+ int i;
+
+ memcpy(&ehdr, output, sizeof(ehdr));
+@@ -351,13 +351,16 @@ static void parse_elf(void *output)
+ case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+ dest = output;
+- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+ dest = (void *)(phdr->p_paddr);
+ #endif
+ memcpy(dest,
+ output + phdr->p_offset,
+ phdr->p_filesz);
++ if (i)
++ memset(prev, 0xff, dest - prev);
++ prev = dest + phdr->p_filesz;
+ break;
+ default: /* Ignore other PT_* */ break;
+ }
+@@ -419,7 +422,7 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
+ error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+ error("Wrong destination address");
+ #endif
+
+diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
+index 1fd7d57..0f7d096 100644
+--- a/arch/x86/boot/cpucheck.c
++++ b/arch/x86/boot/cpucheck.c
+@@ -125,9 +125,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_K7_HWCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax &= ~(1 << 15);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ get_cpuflags(); /* Make sure it really did something */
+ err = check_cpuflags();
+@@ -140,9 +140,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_VIA_FCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax |= (1<<1)|(1<<7);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ set_bit(X86_FEATURE_CX8, cpu.flags);
+ err = check_cpuflags();
+@@ -153,12 +153,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 eax, edx;
+ u32 level = 1;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+- asm("cpuid"
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++ asm volatile("cpuid"
+ : "+a" (level), "=d" (cpu.flags[0])
+ : : "ecx", "ebx");
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ err = check_cpuflags();
+ } else if (err == 0x01 &&
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index 16ef025..91e033b 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -438,10 +438,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
+ # single linked list of
+ # struct setup_data
+
+-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
+
+ #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
++#else
+ #define VO_INIT_SIZE (VO__end - VO__text)
++#endif
+ #if ZO_INIT_SIZE > VO_INIT_SIZE
+ #define INIT_SIZE ZO_INIT_SIZE
+ #else
+diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
+index db75d07..8e6d0af 100644
+--- a/arch/x86/boot/memory.c
++++ b/arch/x86/boot/memory.c
+@@ -19,7 +19,7 @@
+
+ static int detect_memory_e820(void)
+ {
+- int count = 0;
++ unsigned int count = 0;
+ struct biosregs ireg, oreg;
+ struct e820entry *desc = boot_params.e820_map;
+ static struct e820entry buf; /* static so it is zeroed */
+diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
+index ba3e100..6501b8f 100644
+--- a/arch/x86/boot/video-vesa.c
++++ b/arch/x86/boot/video-vesa.c
+@@ -201,6 +201,7 @@ static void vesa_store_pm_info(void)
+
+ boot_params.screen_info.vesapm_seg = oreg.es;
+ boot_params.screen_info.vesapm_off = oreg.di;
++ boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+
+ /*
+diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
+index 05111bb..a1ae1f0 100644
+--- a/arch/x86/boot/video.c
++++ b/arch/x86/boot/video.c
+@@ -98,7 +98,7 @@ static void store_mode_params(void)
+ static unsigned int get_entry(void)
+ {
+ char entry_buf[4];
+- int i, len = 0;
++ unsigned int i, len = 0;
+ int key;
+ unsigned int v;
+
+diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
+index 9105655..41779c1 100644
+--- a/arch/x86/crypto/aes-x86_64-asm_64.S
++++ b/arch/x86/crypto/aes-x86_64-asm_64.S
+@@ -8,6 +8,8 @@
+ * including this sentence is retained in full.
+ */
+
++#include <asm/alternative-asm.h>
++
+ .extern crypto_ft_tab
+ .extern crypto_it_tab
+ .extern crypto_fl_tab
+@@ -70,6 +72,8 @@
+ je B192; \
+ leaq 32(r9),r9;
+
++#define ret pax_force_retaddr; ret
++
+ #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
+ movq r1,r2; \
+ movq r3,r4; \
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index 6bd2c6c..368c93e 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -31,6 +31,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+
+ /*
+ * The following macros are used to move an (un)aligned 16 byte value to/from
+@@ -217,7 +218,7 @@ enc: .octa 0x2
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -227,8 +228,8 @@ enc: .octa 0x2
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ MOVADQ SHUF_MASK(%rip), %xmm14
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r15 # %r15 = aadLen
++ mov %r15, %r11
+ pxor %xmm\i, %xmm\i
+
+ _get_AAD_loop\num_initial_blocks\operation:
+@@ -237,17 +238,17 @@ _get_AAD_loop\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+ pxor \TMP1, %xmm\i
+ add $4, %r10
+- sub $4, %r12
++ sub $4, %r15
+ jne _get_AAD_loop\num_initial_blocks\operation
+
+ cmp $16, %r11
+ je _get_AAD_loop2_done\num_initial_blocks\operation
+
+- mov $16, %r12
++ mov $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+- sub $4, %r12
+- cmp %r11, %r12
++ sub $4, %r15
++ cmp %r11, %r15
+ jne _get_AAD_loop2\num_initial_blocks\operation
+
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+@@ -442,7 +443,7 @@ _initial_blocks_done\num_initial_blocks\operation:
+ * num_initial_blocks = b mod 4
+ * encrypt the initial num_initial_blocks blocks and apply ghash on
+ * the ciphertext
+-* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
++* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+ * are clobbered
+ * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+ */
+@@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
+ XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+ MOVADQ SHUF_MASK(%rip), %xmm14
+ mov arg7, %r10 # %r10 = AAD
+- mov arg8, %r12 # %r12 = aadLen
+- mov %r12, %r11
++ mov arg8, %r15 # %r15 = aadLen
++ mov %r15, %r11
+ pxor %xmm\i, %xmm\i
+ _get_AAD_loop\num_initial_blocks\operation:
+ movd (%r10), \TMP1
+@@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+ pxor \TMP1, %xmm\i
+ add $4, %r10
+- sub $4, %r12
++ sub $4, %r15
+ jne _get_AAD_loop\num_initial_blocks\operation
+ cmp $16, %r11
+ je _get_AAD_loop2_done\num_initial_blocks\operation
+- mov $16, %r12
++ mov $16, %r15
+ _get_AAD_loop2\num_initial_blocks\operation:
+ psrldq $4, %xmm\i
+- sub $4, %r12
+- cmp %r11, %r12
++ sub $4, %r15
++ cmp %r11, %r15
+ jne _get_AAD_loop2\num_initial_blocks\operation
+ _get_AAD_loop2_done\num_initial_blocks\operation:
+ PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
+@@ -1280,7 +1281,7 @@ _esb_loop_\@:
+ *
+ *****************************************************************************/
+ ENTRY(aesni_gcm_dec)
+- push %r12
++ push %r15
+ push %r13
+ push %r14
+ mov %rsp, %r14
+@@ -1290,8 +1291,8 @@ ENTRY(aesni_gcm_dec)
+ */
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp # align rsp to 64 bytes
+- mov %arg6, %r12
+- movdqu (%r12), %xmm13 # %xmm13 = HashKey
++ mov %arg6, %r15
++ movdqu (%r15), %xmm13 # %xmm13 = HashKey
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm13
+
+@@ -1319,10 +1320,10 @@ ENTRY(aesni_gcm_dec)
+ movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
+ mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
+ and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
+- mov %r13, %r12
+- and $(3<<4), %r12
++ mov %r13, %r15
++ and $(3<<4), %r15
+ jz _initial_num_blocks_is_0_decrypt
+- cmp $(2<<4), %r12
++ cmp $(2<<4), %r15
+ jb _initial_num_blocks_is_1_decrypt
+ je _initial_num_blocks_is_2_decrypt
+ _initial_num_blocks_is_3_decrypt:
+@@ -1372,16 +1373,16 @@ _zero_cipher_left_decrypt:
+ sub $16, %r11
+ add %r13, %r11
+ movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
+- lea SHIFT_MASK+16(%rip), %r12
+- sub %r13, %r12
++ lea SHIFT_MASK+16(%rip), %r15
++ sub %r13, %r15
+ # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
+ # (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
+
+ movdqa %xmm1, %xmm2
+ pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
+ # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
+ pand %xmm1, %xmm2
+@@ -1410,9 +1411,9 @@ _less_than_8_bytes_left_decrypt:
+ sub $1, %r13
+ jne _less_than_8_bytes_left_decrypt
+ _multiple_of_16_bytes_decrypt:
+- mov arg8, %r12 # %r13 = aadLen (number of bytes)
+- shl $3, %r12 # convert into number of bits
+- movd %r12d, %xmm15 # len(A) in %xmm15
++ mov arg8, %r15 # %r13 = aadLen (number of bytes)
++ shl $3, %r15 # convert into number of bits
++ movd %r15d, %xmm15 # len(A) in %xmm15
+ shl $3, %arg4 # len(C) in bits (*128)
+ MOVQ_R64_XMM %arg4, %xmm1
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+@@ -1451,7 +1452,8 @@ _return_T_done_decrypt:
+ mov %r14, %rsp
+ pop %r14
+ pop %r13
+- pop %r12
++ pop %r15
++ pax_force_retaddr
+ ret
+ ENDPROC(aesni_gcm_dec)
+
+@@ -1540,7 +1542,7 @@ ENDPROC(aesni_gcm_dec)
+ * poly = x^128 + x^127 + x^126 + x^121 + 1
+ ***************************************************************************/
+ ENTRY(aesni_gcm_enc)
+- push %r12
++ push %r15
+ push %r13
+ push %r14
+ mov %rsp, %r14
+@@ -1550,8 +1552,8 @@ ENTRY(aesni_gcm_enc)
+ #
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp
+- mov %arg6, %r12
+- movdqu (%r12), %xmm13
++ mov %arg6, %r15
++ movdqu (%r15), %xmm13
+ movdqa SHUF_MASK(%rip), %xmm2
+ PSHUFB_XMM %xmm2, %xmm13
+
+@@ -1575,13 +1577,13 @@ ENTRY(aesni_gcm_enc)
+ movdqa %xmm13, HashKey(%rsp)
+ mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
+ and $-16, %r13
+- mov %r13, %r12
++ mov %r13, %r15
+
+ # Encrypt first few blocks
+
+- and $(3<<4), %r12
++ and $(3<<4), %r15
+ jz _initial_num_blocks_is_0_encrypt
+- cmp $(2<<4), %r12
++ cmp $(2<<4), %r15
+ jb _initial_num_blocks_is_1_encrypt
+ je _initial_num_blocks_is_2_encrypt
+ _initial_num_blocks_is_3_encrypt:
+@@ -1634,14 +1636,14 @@ _zero_cipher_left_encrypt:
+ sub $16, %r11
+ add %r13, %r11
+ movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
+- lea SHIFT_MASK+16(%rip), %r12
+- sub %r13, %r12
++ lea SHIFT_MASK+16(%rip), %r15
++ sub %r13, %r15
+ # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+ # (%r13 is the number of bytes in plaintext mod 16)
+- movdqu (%r12), %xmm2 # get the appropriate shuffle mask
++ movdqu (%r15), %xmm2 # get the appropriate shuffle mask
+ PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
+ pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
+- movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
++ movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
+ # get the appropriate mask to mask out top 16-r13 bytes of xmm0
+ pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
+ movdqa SHUF_MASK(%rip), %xmm10
+@@ -1674,9 +1676,9 @@ _less_than_8_bytes_left_encrypt:
+ sub $1, %r13
+ jne _less_than_8_bytes_left_encrypt
+ _multiple_of_16_bytes_encrypt:
+- mov arg8, %r12 # %r12 = addLen (number of bytes)
+- shl $3, %r12
+- movd %r12d, %xmm15 # len(A) in %xmm15
++ mov arg8, %r15 # %r15 = addLen (number of bytes)
++ shl $3, %r15
++ movd %r15d, %xmm15 # len(A) in %xmm15
+ shl $3, %arg4 # len(C) in bits (*128)
+ MOVQ_R64_XMM %arg4, %xmm1
+ pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
+@@ -1715,7 +1717,8 @@ _return_T_done_encrypt:
+ mov %r14, %rsp
+ pop %r14
+ pop %r13
+- pop %r12
++ pop %r15
++ pax_force_retaddr
+ ret
+ ENDPROC(aesni_gcm_enc)
+
+@@ -1733,6 +1736,7 @@ _key_expansion_256a:
+ pxor %xmm1, %xmm0
+ movaps %xmm0, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr
+ ret
+ ENDPROC(_key_expansion_128)
+ ENDPROC(_key_expansion_256a)
+@@ -1759,6 +1763,7 @@ _key_expansion_192a:
+ shufps $0b01001110, %xmm2, %xmm1
+ movaps %xmm1, 0x10(TKEYP)
+ add $0x20, TKEYP
++ pax_force_retaddr
+ ret
+ ENDPROC(_key_expansion_192a)
+
+@@ -1779,6 +1784,7 @@ _key_expansion_192b:
+
+ movaps %xmm0, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr
+ ret
+ ENDPROC(_key_expansion_192b)
+
+@@ -1792,6 +1798,7 @@ _key_expansion_256b:
+ pxor %xmm1, %xmm2
+ movaps %xmm2, (TKEYP)
+ add $0x10, TKEYP
++ pax_force_retaddr
+ ret
+ ENDPROC(_key_expansion_256b)
+
+@@ -1905,6 +1912,7 @@ ENTRY(aesni_set_key)
+ #ifndef __x86_64__
+ popl KEYP
+ #endif
++ pax_force_retaddr
+ ret
+ ENDPROC(aesni_set_key)
+
+@@ -1927,6 +1935,7 @@ ENTRY(aesni_enc)
+ popl KLEN
+ popl KEYP
+ #endif
++ pax_force_retaddr
+ ret
+ ENDPROC(aesni_enc)
+
+@@ -1985,6 +1994,7 @@ _aesni_enc1:
+ AESENC KEY STATE
+ movaps 0x70(TKEYP), KEY
+ AESENCLAST KEY STATE
++ pax_force_retaddr
+ ret
+ ENDPROC(_aesni_enc1)
+
+@@ -2094,6 +2104,7 @@ _aesni_enc4:
+ AESENCLAST KEY STATE2
+ AESENCLAST KEY STATE3
+ AESENCLAST KEY STATE4
++ pax_force_retaddr
+ ret
+ ENDPROC(_aesni_enc4)
+
+@@ -2117,6 +2128,7 @@ ENTRY(aesni_dec)
+ popl KLEN
+ popl KEYP
+ #endif
++ pax_force_retaddr
+ ret
+ ENDPROC(aesni_dec)
+
+@@ -2175,6 +2187,7 @@ _aesni_dec1:
+ AESDEC KEY STATE
+ movaps 0x70(TKEYP), KEY
+ AESDECLAST KEY STATE
++ pax_force_retaddr
+ ret
+ ENDPROC(_aesni_dec1)
+
+@@ -2284,6 +2297,7 @@ _aesni_dec4:
+ AESDECLAST KEY STATE2
+ AESDECLAST KEY STATE3
+ AESDECLAST KEY STATE4
++ pax_force_retaddr
+ ret
+ ENDPROC(_aesni_dec4)
+
+@@ -2342,6 +2356,7 @@ ENTRY(aesni_ecb_enc)
+ popl KEYP
+ popl LEN
+ #endif
++ pax_force_retaddr
+ ret
+ ENDPROC(aesni_ecb_enc)
+
+@@ -2401,6 +2416,7 @@ ENTRY(aesni_ecb_dec)
+ popl KEYP
+ popl LEN
+ #endif
++ pax_force_retaddr
+ ret
+ ENDPROC(aesni_ecb_dec)
+
+@@ -2443,6 +2459,7 @@ ENTRY(aesni_cbc_enc)
+ popl LEN
+ popl IVP
+ #endif
++ pax_force_retaddr
+ ret
+ ENDPROC(aesni_cbc_enc)
+
+@@ -2534,6 +2551,7 @@ ENTRY(aesni_cbc_dec)
+ popl LEN
+ popl IVP
+ #endif
++ pax_force_retaddr
+ ret
+ ENDPROC(aesni_cbc_dec)
+
+@@ -2561,6 +2579,7 @@ _aesni_inc_init:
+ mov $1, TCTR_LOW
+ MOVQ_R64_XMM TCTR_LOW INC
+ MOVQ_R64_XMM CTR TCTR_LOW
++ pax_force_retaddr
+ ret
+ ENDPROC(_aesni_inc_init)
+
+@@ -2590,6 +2609,7 @@ _aesni_inc:
+ .Linc_low:
+ movaps CTR, IV
+ PSHUFB_XMM BSWAP_MASK IV
++ pax_force_retaddr
+ ret
+ ENDPROC(_aesni_inc)
+
+@@ -2651,6 +2671,7 @@ ENTRY(aesni_ctr_enc)
+ .Lctr_enc_ret:
+ movups IV, (IVP)
+ .Lctr_enc_just_ret:
++ pax_force_retaddr
+ ret
+ ENDPROC(aesni_ctr_enc)
+
+@@ -2777,6 +2798,7 @@ ENTRY(aesni_xts_crypt8)
+ pxor INC, STATE4
+ movdqu STATE4, 0x70(OUTP)
+
++ pax_force_retaddr
+ ret
+ ENDPROC(aesni_xts_crypt8)
+
+diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+index 246c670..466e2d6 100644
+--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
++++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
+@@ -21,6 +21,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .file "blowfish-x86_64-asm.S"
+ .text
+@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
+ jnz .L__enc_xor;
+
+ write_block();
++ pax_force_retaddr
+ ret;
+ .L__enc_xor:
+ xor_block();
++ pax_force_retaddr
+ ret;
+ ENDPROC(__blowfish_enc_blk)
+
+@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
+
+ movq %r11, %rbp;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(blowfish_dec_blk)
+
+@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
+
+ popq %rbx;
+ popq %rbp;
++ pax_force_retaddr
+ ret;
+
+ .L__enc_xor4:
+@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
+
+ popq %rbx;
+ popq %rbp;
++ pax_force_retaddr
+ ret;
+ ENDPROC(__blowfish_enc_blk_4way)
+
+@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
+ popq %rbx;
+ popq %rbp;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(blowfish_dec_blk_4way)
+diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+index ce71f92..1dce7ec 100644
+--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+@@ -16,6 +16,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+
+@@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+ roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
+ %rcx, (%r9));
++ pax_force_retaddr
+ ret;
+ ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
+
+@@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+ roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
+ %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
+ %rax, (%r9));
++ pax_force_retaddr
+ ret;
+ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+
+@@ -780,6 +783,7 @@ __camellia_enc_blk16:
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
+
++ pax_force_retaddr
+ ret;
+
+ .align 8
+@@ -865,6 +869,7 @@ __camellia_dec_blk16:
+ %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
+ %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
+
++ pax_force_retaddr
+ ret;
+
+ .align 8
+@@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+ %xmm8, %rsi);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_ecb_enc_16way)
+
+@@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+ %xmm8, %rsi);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_ecb_dec_16way)
+
+@@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+ %xmm8, %rsi);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_cbc_dec_16way)
+
+@@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+ %xmm8, %rsi);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_ctr_16way)
+
+@@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
+ %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
+ %xmm8, %rsi);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_xts_crypt_16way)
+
+diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+index 0e0b886..5a3123c 100644
+--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
++++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+@@ -11,6 +11,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ #define CAMELLIA_TABLE_BYTE_LEN 272
+
+@@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
+ roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
+ %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
+ %rcx, (%r9));
++ pax_force_retaddr
+ ret;
+ ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
+
+@@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
+ roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
+ %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
+ %rax, (%r9));
++ pax_force_retaddr
+ ret;
+ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
+
+@@ -820,6 +823,7 @@ __camellia_enc_blk32:
+ %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
+ %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
+
++ pax_force_retaddr
+ ret;
+
+ .align 8
+@@ -905,6 +909,7 @@ __camellia_dec_blk32:
+ %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
+ %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
+
++ pax_force_retaddr
+ ret;
+
+ .align 8
+@@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
+
+ vzeroupper;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_ecb_enc_32way)
+
+@@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
+
+ vzeroupper;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_ecb_dec_32way)
+
+@@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
+
+ vzeroupper;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_cbc_dec_32way)
+
+@@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
+
+ vzeroupper;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_ctr_32way)
+
+@@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
+
+ vzeroupper;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_xts_crypt_32way)
+
+diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
+index 310319c..db3d7b5 100644
+--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
++++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
+@@ -21,6 +21,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .file "camellia-x86_64-asm_64.S"
+ .text
+@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
+ enc_outunpack(mov, RT1);
+
+ movq RRBP, %rbp;
++ pax_force_retaddr
+ ret;
+
+ .L__enc_xor:
+ enc_outunpack(xor, RT1);
+
+ movq RRBP, %rbp;
++ pax_force_retaddr
+ ret;
+ ENDPROC(__camellia_enc_blk)
+
+@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
+ dec_outunpack();
+
+ movq RRBP, %rbp;
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_dec_blk)
+
+@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
+
+ movq RRBP, %rbp;
+ popq %rbx;
++ pax_force_retaddr
+ ret;
+
+ .L__enc2_xor:
+@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
+
+ movq RRBP, %rbp;
+ popq %rbx;
++ pax_force_retaddr
+ ret;
+ ENDPROC(__camellia_enc_blk_2way)
+
+@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
+
+ movq RRBP, %rbp;
+ movq RXOR, %rbx;
++ pax_force_retaddr
+ ret;
+ ENDPROC(camellia_dec_blk_2way)
+diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+index c35fd5d..2d8c7db 100644
+--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+@@ -24,6 +24,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .file "cast5-avx-x86_64-asm_64.S"
+
+@@ -281,6 +282,7 @@ __cast5_enc_blk16:
+ outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
+ outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(__cast5_enc_blk16)
+
+@@ -352,6 +354,7 @@ __cast5_dec_blk16:
+ outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
+ outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
+
++ pax_force_retaddr
+ ret;
+
+ .L__skip_dec:
+@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
+ vmovdqu RR4, (6*4*4)(%r11);
+ vmovdqu RL4, (7*4*4)(%r11);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(cast5_ecb_enc_16way)
+
+@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
+ vmovdqu RR4, (6*4*4)(%r11);
+ vmovdqu RL4, (7*4*4)(%r11);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(cast5_ecb_dec_16way)
+
+@@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
+ * %rdx: src
+ */
+
+- pushq %r12;
++ pushq %r14;
+
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
+
+ vmovdqu (0*16)(%rdx), RL1;
+ vmovdqu (1*16)(%rdx), RR1;
+@@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
+ call __cast5_dec_blk16;
+
+ /* xor with src */
+- vmovq (%r12), RX;
++ vmovq (%r14), RX;
+ vpshufd $0x4f, RX, RX;
+ vpxor RX, RR1, RR1;
+- vpxor 0*16+8(%r12), RL1, RL1;
+- vpxor 1*16+8(%r12), RR2, RR2;
+- vpxor 2*16+8(%r12), RL2, RL2;
+- vpxor 3*16+8(%r12), RR3, RR3;
+- vpxor 4*16+8(%r12), RL3, RL3;
+- vpxor 5*16+8(%r12), RR4, RR4;
+- vpxor 6*16+8(%r12), RL4, RL4;
++ vpxor 0*16+8(%r14), RL1, RL1;
++ vpxor 1*16+8(%r14), RR2, RR2;
++ vpxor 2*16+8(%r14), RL2, RL2;
++ vpxor 3*16+8(%r14), RR3, RR3;
++ vpxor 4*16+8(%r14), RL3, RL3;
++ vpxor 5*16+8(%r14), RR4, RR4;
++ vpxor 6*16+8(%r14), RL4, RL4;
+
+ vmovdqu RR1, (0*16)(%r11);
+ vmovdqu RL1, (1*16)(%r11);
+@@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
+ vmovdqu RR4, (6*16)(%r11);
+ vmovdqu RL4, (7*16)(%r11);
+
+- popq %r12;
++ popq %r14;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(cast5_cbc_dec_16way)
+
+@@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
+ * %rcx: iv (big endian, 64bit)
+ */
+
+- pushq %r12;
++ pushq %r14;
+
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
+
+ vpcmpeqd RTMP, RTMP, RTMP;
+ vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
+@@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
+ call __cast5_enc_blk16;
+
+ /* dst = src ^ iv */
+- vpxor (0*16)(%r12), RR1, RR1;
+- vpxor (1*16)(%r12), RL1, RL1;
+- vpxor (2*16)(%r12), RR2, RR2;
+- vpxor (3*16)(%r12), RL2, RL2;
+- vpxor (4*16)(%r12), RR3, RR3;
+- vpxor (5*16)(%r12), RL3, RL3;
+- vpxor (6*16)(%r12), RR4, RR4;
+- vpxor (7*16)(%r12), RL4, RL4;
++ vpxor (0*16)(%r14), RR1, RR1;
++ vpxor (1*16)(%r14), RL1, RL1;
++ vpxor (2*16)(%r14), RR2, RR2;
++ vpxor (3*16)(%r14), RL2, RL2;
++ vpxor (4*16)(%r14), RR3, RR3;
++ vpxor (5*16)(%r14), RL3, RL3;
++ vpxor (6*16)(%r14), RR4, RR4;
++ vpxor (7*16)(%r14), RL4, RL4;
+ vmovdqu RR1, (0*16)(%r11);
+ vmovdqu RL1, (1*16)(%r11);
+ vmovdqu RR2, (2*16)(%r11);
+@@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
+ vmovdqu RR4, (6*16)(%r11);
+ vmovdqu RL4, (7*16)(%r11);
+
+- popq %r12;
++ popq %r14;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(cast5_ctr_16way)
+diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+index e3531f8..e123f35 100644
+--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+@@ -24,6 +24,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx.S"
+
+ .file "cast6-avx-x86_64-asm_64.S"
+@@ -295,6 +296,7 @@ __cast6_enc_blk8:
+ outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
+ outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(__cast6_enc_blk8)
+
+@@ -340,6 +342,7 @@ __cast6_dec_blk8:
+ outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
+ outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(__cast6_dec_blk8)
+
+@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
+
+ store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(cast6_ecb_enc_8way)
+
+@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
+
+ store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(cast6_ecb_dec_8way)
+
+@@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
+ * %rdx: src
+ */
+
+- pushq %r12;
++ pushq %r14;
+
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
+
+ load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+ call __cast6_dec_blk8;
+
+- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- popq %r12;
++ popq %r14;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(cast6_cbc_dec_8way)
+
+@@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
+ * %rcx: iv (little endian, 128bit)
+ */
+
+- pushq %r12;
++ pushq %r14;
+
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
+
+ load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
+ RD2, RX, RKR, RKM);
+
+ call __cast6_enc_blk8;
+
+- store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++ store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- popq %r12;
++ popq %r14;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(cast6_ctr_8way)
+
+@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
+ /* dst <= regs xor IVs(in dst) */
+ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(cast6_xts_enc_8way)
+
+@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
+ /* dst <= regs xor IVs(in dst) */
+ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(cast6_xts_dec_8way)
+diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+index 225be06..2885e731 100644
+--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
++++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+@@ -45,6 +45,7 @@
+
+ #include <asm/inst.h>
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
+
+@@ -309,6 +310,7 @@ do_return:
+ popq %rsi
+ popq %rdi
+ popq %rbx
++ pax_force_retaddr
+ ret
+
+ ################################################################
+@@ -330,7 +332,7 @@ ENDPROC(crc_pcl)
+ ## PCLMULQDQ tables
+ ## Table is 128 entries x 2 words (8 bytes) each
+ ################################################################
+-.section .rotata, "a", %progbits
++.section .rodata, "a", %progbits
+ .align 8
+ K_table:
+ .long 0x493c7d27, 0x00000001
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
+index 5d1e007..098cb4f 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
++++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
+@@ -18,6 +18,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/inst.h>
++#include <asm/alternative-asm.h>
+
+ .data
+
+@@ -89,6 +90,7 @@ __clmul_gf128mul_ble:
+ psrlq $1, T2
+ pxor T2, T1
+ pxor T1, DATA
++ pax_force_retaddr
+ ret
+ ENDPROC(__clmul_gf128mul_ble)
+
+@@ -101,6 +103,7 @@ ENTRY(clmul_ghash_mul)
+ call __clmul_gf128mul_ble
+ PSHUFB_XMM BSWAP DATA
+ movups DATA, (%rdi)
++ pax_force_retaddr
+ ret
+ ENDPROC(clmul_ghash_mul)
+
+@@ -128,5 +131,6 @@ ENTRY(clmul_ghash_update)
+ PSHUFB_XMM BSWAP DATA
+ movups DATA, (%rdi)
+ .Lupdate_just_ret:
++ pax_force_retaddr
+ ret
+ ENDPROC(clmul_ghash_update)
+diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+index 9279e0b..c4b3d2c 100644
+--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
++++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+@@ -1,4 +1,5 @@
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ # enter salsa20_encrypt_bytes
+ ENTRY(salsa20_encrypt_bytes)
+@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+ # bytesatleast65:
+ ._bytesatleast65:
+@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+ ENDPROC(salsa20_keysetup)
+
+@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+ ENDPROC(salsa20_ivsetup)
+diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+index 2f202f4..d9164d6 100644
+--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+@@ -24,6 +24,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx.S"
+
+ .file "serpent-avx-x86_64-asm_64.S"
+@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
+ write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+ write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(__serpent_enc_blk8_avx)
+
+@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
+ write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+ write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(__serpent_dec_blk8_avx)
+
+@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
+
+ store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_ecb_enc_8way_avx)
+
+@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
+
+ store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_ecb_dec_8way_avx)
+
+@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
+
+ store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_cbc_dec_8way_avx)
+
+@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
+
+ store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_ctr_8way_avx)
+
+@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
+ /* dst <= regs xor IVs(in dst) */
+ store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_xts_enc_8way_avx)
+
+@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
+ /* dst <= regs xor IVs(in dst) */
+ store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_xts_dec_8way_avx)
+diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
+index b222085..abd483c 100644
+--- a/arch/x86/crypto/serpent-avx2-asm_64.S
++++ b/arch/x86/crypto/serpent-avx2-asm_64.S
+@@ -15,6 +15,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx2.S"
+
+ .file "serpent-avx2-asm_64.S"
+@@ -610,6 +611,7 @@ __serpent_enc_blk16:
+ write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+ write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(__serpent_enc_blk16)
+
+@@ -664,6 +666,7 @@ __serpent_dec_blk16:
+ write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+ write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(__serpent_dec_blk16)
+
+@@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
+
+ vzeroupper;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_ecb_enc_16way)
+
+@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
+
+ vzeroupper;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_ecb_dec_16way)
+
+@@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
+
+ vzeroupper;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_cbc_dec_16way)
+
+@@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
+
+ vzeroupper;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_ctr_16way)
+
+@@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
+
+ vzeroupper;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_xts_enc_16way)
+
+@@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
+
+ vzeroupper;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_xts_dec_16way)
+diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
+index acc066c..1559cc4 100644
+--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
++++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
+@@ -25,6 +25,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .file "serpent-sse2-x86_64-asm_64.S"
+ .text
+@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
+ write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+ write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+
++ pax_force_retaddr
+ ret;
+
+ .L__enc_xor8:
+ xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
+ xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(__serpent_enc_blk_8way)
+
+@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
+ write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
+ write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(serpent_dec_blk_8way)
+diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
+index a410950..9dfe7ad 100644
+--- a/arch/x86/crypto/sha1_ssse3_asm.S
++++ b/arch/x86/crypto/sha1_ssse3_asm.S
+@@ -29,6 +29,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ #define CTX %rdi // arg1
+ #define BUF %rsi // arg2
+@@ -75,9 +76,9 @@
+
+ push %rbx
+ push %rbp
+- push %r12
++ push %r14
+
+- mov %rsp, %r12
++ mov %rsp, %r14
+ sub $64, %rsp # allocate workspace
+ and $~15, %rsp # align stack
+
+@@ -99,11 +100,12 @@
+ xor %rax, %rax
+ rep stosq
+
+- mov %r12, %rsp # deallocate workspace
++ mov %r14, %rsp # deallocate workspace
+
+- pop %r12
++ pop %r14
+ pop %rbp
+ pop %rbx
++ pax_force_retaddr
+ ret
+
+ ENDPROC(\name)
+diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
+index 92b3b5d..0dc1dcb 100644
+--- a/arch/x86/crypto/sha256-avx-asm.S
++++ b/arch/x86/crypto/sha256-avx-asm.S
+@@ -49,6 +49,7 @@
+
+ #ifdef CONFIG_AS_AVX
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ ## assume buffers not aligned
+ #define VMOVDQ vmovdqu
+@@ -460,6 +461,7 @@ done_hash:
+ popq %r13
+ popq %rbp
+ popq %rbx
++ pax_force_retaddr
+ ret
+ ENDPROC(sha256_transform_avx)
+
+diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
+index 570ec5e..cf2b625 100644
+--- a/arch/x86/crypto/sha256-avx2-asm.S
++++ b/arch/x86/crypto/sha256-avx2-asm.S
+@@ -50,6 +50,7 @@
+
+ #ifdef CONFIG_AS_AVX2
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ ## assume buffers not aligned
+ #define VMOVDQ vmovdqu
+@@ -720,6 +721,7 @@ done_hash:
+ popq %r12
+ popq %rbp
+ popq %rbx
++ pax_force_retaddr
+ ret
+ ENDPROC(sha256_transform_rorx)
+
+diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
+index 2cedc44..5144899 100644
+--- a/arch/x86/crypto/sha256-ssse3-asm.S
++++ b/arch/x86/crypto/sha256-ssse3-asm.S
+@@ -47,6 +47,7 @@
+ ########################################################################
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ ## assume buffers not aligned
+ #define MOVDQ movdqu
+@@ -471,6 +472,7 @@ done_hash:
+ popq %rbp
+ popq %rbx
+
++ pax_force_retaddr
+ ret
+ ENDPROC(sha256_transform_ssse3)
+
+diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
+index 565274d..af6bc08 100644
+--- a/arch/x86/crypto/sha512-avx-asm.S
++++ b/arch/x86/crypto/sha512-avx-asm.S
+@@ -49,6 +49,7 @@
+
+ #ifdef CONFIG_AS_AVX
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .text
+
+@@ -364,6 +365,7 @@ updateblock:
+ mov frame_RSPSAVE(%rsp), %rsp
+
+ nowork:
++ pax_force_retaddr
+ ret
+ ENDPROC(sha512_transform_avx)
+
+diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
+index 1f20b35..f25c8c1 100644
+--- a/arch/x86/crypto/sha512-avx2-asm.S
++++ b/arch/x86/crypto/sha512-avx2-asm.S
+@@ -51,6 +51,7 @@
+
+ #ifdef CONFIG_AS_AVX2
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .text
+
+@@ -678,6 +679,7 @@ done_hash:
+
+ # Restore Stack Pointer
+ mov frame_RSPSAVE(%rsp), %rsp
++ pax_force_retaddr
+ ret
+ ENDPROC(sha512_transform_rorx)
+
+diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
+index e610e29..ffcb5ed 100644
+--- a/arch/x86/crypto/sha512-ssse3-asm.S
++++ b/arch/x86/crypto/sha512-ssse3-asm.S
+@@ -48,6 +48,7 @@
+ ########################################################################
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .text
+
+@@ -363,6 +364,7 @@ updateblock:
+ mov frame_RSPSAVE(%rsp), %rsp
+
+ nowork:
++ pax_force_retaddr
+ ret
+ ENDPROC(sha512_transform_ssse3)
+
+diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+index 0505813..b067311 100644
+--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
++++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+@@ -24,6 +24,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+ #include "glue_helper-asm-avx.S"
+
+ .file "twofish-avx-x86_64-asm_64.S"
+@@ -284,6 +285,7 @@ __twofish_enc_blk8:
+ outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
+ outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(__twofish_enc_blk8)
+
+@@ -324,6 +326,7 @@ __twofish_dec_blk8:
+ outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
+ outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(__twofish_dec_blk8)
+
+@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
+
+ store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(twofish_ecb_enc_8way)
+
+@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
+
+ store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(twofish_ecb_dec_8way)
+
+@@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
+ * %rdx: src
+ */
+
+- pushq %r12;
++ pushq %r14;
+
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
+
+ load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+
+ call __twofish_dec_blk8;
+
+- store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
++ store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
+- popq %r12;
++ popq %r14;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(twofish_cbc_dec_8way)
+
+@@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
+ * %rcx: iv (little endian, 128bit)
+ */
+
+- pushq %r12;
++ pushq %r14;
+
+ movq %rsi, %r11;
+- movq %rdx, %r12;
++ movq %rdx, %r14;
+
+ load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
+ RD2, RX0, RX1, RY0);
+
+ call __twofish_enc_blk8;
+
+- store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
++ store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+
+- popq %r12;
++ popq %r14;
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(twofish_ctr_8way)
+
+@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
+ /* dst <= regs xor IVs(in dst) */
+ store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(twofish_xts_enc_8way)
+
+@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
+ /* dst <= regs xor IVs(in dst) */
+ store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
+
++ pax_force_retaddr
+ ret;
+ ENDPROC(twofish_xts_dec_8way)
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+index 1c3b7ce..02f578d 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+@@ -21,6 +21,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .file "twofish-x86_64-asm-3way.S"
+ .text
+@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr
+ ret;
+
+ .L__enc_xor3:
+@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr
+ ret;
+ ENDPROC(__twofish_enc_blk_3way)
+
+@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
+ popq %r13;
+ popq %r14;
+ popq %r15;
++ pax_force_retaddr
+ ret;
+ ENDPROC(twofish_dec_blk_3way)
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
+index a350c99..c1bac24 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
+@@ -22,6 +22,7 @@
+
+ #include <linux/linkage.h>
+ #include <asm/asm-offsets.h>
++#include <asm/alternative-asm.h>
+
+ #define a_offset 0
+ #define b_offset 4
+@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
+
+ popq R1
+ movl $1,%eax
++ pax_force_retaddr
+ ret
+ ENDPROC(twofish_enc_blk)
+
+@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
+
+ popq R1
+ movl $1,%eax
++ pax_force_retaddr
+ ret
+ ENDPROC(twofish_dec_blk)
+diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
+index f4e6308..7ba29a1 100644
+--- a/arch/x86/entry/calling.h
++++ b/arch/x86/entry/calling.h
+@@ -93,23 +93,26 @@ For 32-bit we have the following conventions - kernel is built with
+ .endm
+
+ .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq %r12, R12+\offset(%rsp)
++#endif
+ .if \r11
+- movq %r11, 6*8+\offset(%rsp)
++ movq %r11, R11+\offset(%rsp)
+ .endif
+ .if \r8910
+- movq %r10, 7*8+\offset(%rsp)
+- movq %r9, 8*8+\offset(%rsp)
+- movq %r8, 9*8+\offset(%rsp)
++ movq %r10, R10+\offset(%rsp)
++ movq %r9, R9+\offset(%rsp)
++ movq %r8, R8+\offset(%rsp)
+ .endif
+ .if \rax
+- movq %rax, 10*8+\offset(%rsp)
++ movq %rax, RAX+\offset(%rsp)
+ .endif
+ .if \rcx
+- movq %rcx, 11*8+\offset(%rsp)
++ movq %rcx, RCX+\offset(%rsp)
+ .endif
+- movq %rdx, 12*8+\offset(%rsp)
+- movq %rsi, 13*8+\offset(%rsp)
+- movq %rdi, 14*8+\offset(%rsp)
++ movq %rdx, RDX+\offset(%rsp)
++ movq %rsi, RSI+\offset(%rsp)
++ movq %rdi, RDI+\offset(%rsp)
+ .endm
+ .macro SAVE_C_REGS offset=0
+ SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
+@@ -128,76 +131,87 @@ For 32-bit we have the following conventions - kernel is built with
+ .endm
+
+ .macro SAVE_EXTRA_REGS offset=0
+- movq %r15, 0*8+\offset(%rsp)
+- movq %r14, 1*8+\offset(%rsp)
+- movq %r13, 2*8+\offset(%rsp)
+- movq %r12, 3*8+\offset(%rsp)
+- movq %rbp, 4*8+\offset(%rsp)
+- movq %rbx, 5*8+\offset(%rsp)
++ movq %r15, R15+\offset(%rsp)
++ movq %r14, R14+\offset(%rsp)
++ movq %r13, R13+\offset(%rsp)
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq %r12, R12+\offset(%rsp)
++#endif
++ movq %rbp, RBP+\offset(%rsp)
++ movq %rbx, RBX+\offset(%rsp)
+ .endm
+ .macro SAVE_EXTRA_REGS_RBP offset=0
+- movq %rbp, 4*8+\offset(%rsp)
++ movq %rbp, RBP+\offset(%rsp)
+ .endm
+
+ .macro RESTORE_EXTRA_REGS offset=0
+- movq 0*8+\offset(%rsp), %r15
+- movq 1*8+\offset(%rsp), %r14
+- movq 2*8+\offset(%rsp), %r13
+- movq 3*8+\offset(%rsp), %r12
+- movq 4*8+\offset(%rsp), %rbp
+- movq 5*8+\offset(%rsp), %rbx
++ movq R15+\offset(%rsp), %r15
++ movq R14+\offset(%rsp), %r14
++ movq R13+\offset(%rsp), %r13
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq R12+\offset(%rsp), %r12
++#endif
++ movq RBP+\offset(%rsp), %rbp
++ movq RBX+\offset(%rsp), %rbx
+ .endm
+
+ .macro ZERO_EXTRA_REGS
+ xorl %r15d, %r15d
+ xorl %r14d, %r14d
+ xorl %r13d, %r13d
++#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
+ xorl %r12d, %r12d
++#endif
+ xorl %ebp, %ebp
+ xorl %ebx, %ebx
+ .endm
+
+- .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
++ .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1, rstor_r12=1
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ .if \rstor_r12
++ movq R12(%rsp), %r12
++ .endif
++#endif
+ .if \rstor_r11
+- movq 6*8(%rsp), %r11
++ movq R11(%rsp), %r11
+ .endif
+ .if \rstor_r8910
+- movq 7*8(%rsp), %r10
+- movq 8*8(%rsp), %r9
+- movq 9*8(%rsp), %r8
++ movq R10(%rsp), %r10
++ movq R9(%rsp), %r9
++ movq R8(%rsp), %r8
+ .endif
+ .if \rstor_rax
+- movq 10*8(%rsp), %rax
++ movq RAX(%rsp), %rax
+ .endif
+ .if \rstor_rcx
+- movq 11*8(%rsp), %rcx
++ movq RCX(%rsp), %rcx
+ .endif
+ .if \rstor_rdx
+- movq 12*8(%rsp), %rdx
++ movq RDX(%rsp), %rdx
+ .endif
+- movq 13*8(%rsp), %rsi
+- movq 14*8(%rsp), %rdi
++ movq RSI(%rsp), %rsi
++ movq RDI(%rsp), %rdi
+ .endm
+ .macro RESTORE_C_REGS
+- RESTORE_C_REGS_HELPER 1,1,1,1,1
++ RESTORE_C_REGS_HELPER 1,1,1,1,1,1
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_RAX
+- RESTORE_C_REGS_HELPER 0,1,1,1,1
++ RESTORE_C_REGS_HELPER 0,1,1,1,1,0
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_RCX
+- RESTORE_C_REGS_HELPER 1,0,1,1,1
++ RESTORE_C_REGS_HELPER 1,0,1,1,1,0
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_R11
+- RESTORE_C_REGS_HELPER 1,1,0,1,1
++ RESTORE_C_REGS_HELPER 1,1,0,1,1,1
+ .endm
+ .macro RESTORE_C_REGS_EXCEPT_RCX_R11
+- RESTORE_C_REGS_HELPER 1,0,0,1,1
++ RESTORE_C_REGS_HELPER 1,0,0,1,1,1
+ .endm
+ .macro RESTORE_RSI_RDI
+- RESTORE_C_REGS_HELPER 0,0,0,0,0
++ RESTORE_C_REGS_HELPER 0,0,0,0,0,1
+ .endm
+ .macro RESTORE_RSI_RDI_RDX
+- RESTORE_C_REGS_HELPER 0,0,0,0,1
++ RESTORE_C_REGS_HELPER 0,0,0,0,1,1
+ .endm
+
+ .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
+diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
+index 21dc60a..844def1 100644
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -157,13 +157,154 @@
+ movl \reg, PT_GS(%esp)
+ .endm
+ .macro SET_KERNEL_GS reg
++
++#ifdef CONFIG_CC_STACKPROTECTOR
+ movl $(__KERNEL_STACK_CANARY), \reg
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS), \reg
++#else
++ xorl \reg, \reg
++#endif
++
+ movl \reg, %gs
+ .endm
+
+ #endif /* CONFIG_X86_32_LAZY_GS */
+
+-.macro SAVE_ALL
++.macro pax_enter_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++.endm
++
++.macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++.endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ bts $X86_CR0_WP_BIT, %esi
++ jnc 1f
++ mov %cs, %esi
++ cmp $__KERNEL_CS, %esi
++ jz 3f
++ ljmp $__KERNEL_CS, $3f
++1: ljmp $__KERNEXEC_KERNEL_CS, $2f
++2:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++#else
++ mov %esi, %cr0
++#endif
++3:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++#endif
++ mov %cs, %esi
++ cmp $__KERNEXEC_KERNEL_CS, %esi
++ jnz 2f
++#ifdef CONFIG_PARAVIRT
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ btr $X86_CR0_WP_BIT, %esi
++ ljmp $__KERNEL_CS, $1f
++1:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
++#else
++ mov %esi, %cr0
++#endif
++2:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_exit_kernel)
++#endif
++
++ .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * ebp: thread_info
++ */
++ENTRY(pax_erase_kstack)
++ pushl %edi
++ pushl %ecx
++ pushl %eax
++
++ mov TI_lowest_stack(%ebp), %edi
++ mov $-0xBEEF, %eax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $2, %ecx
++ repne scasl
++ jecxz 2f
++
++ cmp $2*16, %ecx
++ jc 2f
++
++ mov $2*16, %ecx
++ repe scasl
++ jecxz 2f
++ jne 1b
++
++2: cld
++ or $2*4, %edi
++ mov %esp, %ecx
++ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %ecx
++ jb 3f
++ ud2
++3:
++
++ shr $2, %ecx
++ rep stosl
++
++ mov TI_task_thread_sp0(%ebp), %edi
++ sub $128, %edi
++ mov %edi, TI_lowest_stack(%ebp)
++
++ popl %eax
++ popl %ecx
++ popl %edi
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
++
++.macro __SAVE_ALL _DS
+ cld
+ PUSH_GS
+ pushl %fs
+@@ -176,7 +317,7 @@
+ pushl %edx
+ pushl %ecx
+ pushl %ebx
+- movl $(__USER_DS), %edx
++ movl $\_DS, %edx
+ movl %edx, %ds
+ movl %edx, %es
+ movl $(__KERNEL_PERCPU), %edx
+@@ -184,6 +325,15 @@
+ SET_KERNEL_GS %edx
+ .endm
+
++.macro SAVE_ALL
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ __SAVE_ALL __KERNEL_DS
++ pax_enter_kernel
++#else
++ __SAVE_ALL __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+ popl %ebx
+ popl %ecx
+@@ -222,7 +372,7 @@ ENTRY(ret_from_fork)
+ pushl $0x0202 # Reset kernel eflags
+ popfl
+ jmp syscall_exit
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+
+ ENTRY(ret_from_kernel_thread)
+ pushl %eax
+@@ -262,7 +412,15 @@ ret_from_intr:
+ andl $SEGMENT_RPL_MASK, %eax
+ #endif
+ cmpl $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jae resume_userspace
++
++ pax_exit_kernel
++ jmp resume_kernel
++#else
+ jb resume_kernel # not returning to v8086 or userspace
++#endif
+
+ ENTRY(resume_userspace)
+ LOCKDEP_SYS_EXIT
+@@ -274,8 +432,8 @@ ENTRY(resume_userspace)
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
+ # int/exception return?
+ jne work_pending
+- jmp restore_all
+-END(ret_from_exception)
++ jmp restore_all_pax
++ENDPROC(ret_from_exception)
+
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+@@ -287,7 +445,7 @@ need_resched:
+ jz restore_all
+ call preempt_schedule_irq
+ jmp need_resched
+-END(resume_kernel)
++ENDPROC(resume_kernel)
+ #endif
+
+ /*
+@@ -312,32 +470,44 @@ sysenter_past_esp:
+ pushl $__USER_CS
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+- * A tiny bit of offset fixup is necessary: TI_sysenter_return
+- * is relative to thread_info, which is at the bottom of the
+- * kernel stack page. 4*4 means the 4 words pushed above;
+- * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
+- * and THREAD_SIZE takes us to the bottom.
+ */
+- pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
++ pushl $0
+
+ pushl %eax
+ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl TI_sysenter_return(%ebp), %ebp
++ movl %ebp, PT_EIP(%esp)
+ ENABLE_INTERRUPTS(CLBR_NONE)
+
+ /*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
++ movl PT_OLDESP(%esp),%ebp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov PT_OLDSS(%esp), %ds
++1: movl %ds:(%ebp), %ebp
++ push %ss
++ pop %ds
++#else
+ cmpl $__PAGE_OFFSET-3, %ebp
+ jae syscall_fault
+ ASM_STAC
+ 1: movl (%ebp), %ebp
+ ASM_CLAC
++#endif
++
+ movl %ebp, PT_EBP(%esp)
+ _ASM_EXTABLE(1b, syscall_fault)
+
+ GET_THREAD_INFO(%ebp)
+
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
+ jnz sysenter_audit
+ sysenter_do_call:
+@@ -353,12 +523,24 @@ sysenter_after_call:
+ testl $_TIF_ALLWORK_MASK, %ecx
+ jnz sysexit_audit
+ sysenter_exit:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushl %eax
++ movl %esp, %eax
++ call pax_randomize_kstack
++ popl %eax
++#endif
++
++ pax_erase_kstack
++
+ /* if something modifies registers it must also disable sysexit */
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp, %ebp
+ TRACE_IRQS_ON
+ 1: mov PT_FS(%esp), %fs
++2: mov PT_DS(%esp), %ds
++3: mov PT_ES(%esp), %es
+ PTGS_TO_GS
+ ENABLE_INTERRUPTS_SYSEXIT
+
+@@ -372,6 +554,9 @@ sysenter_audit:
+ pushl PT_ESI(%esp) /* a3: 5th arg */
+ pushl PT_EDX+4(%esp) /* a2: 4th arg */
+ call __audit_syscall_entry
++
++ pax_erase_kstack
++
+ popl %ecx /* get that remapped edx off the stack */
+ popl %ecx /* get that remapped esi off the stack */
+ movl PT_EAX(%esp), %eax /* reload syscall number */
+@@ -397,10 +582,16 @@ sysexit_audit:
+ #endif
+
+ .pushsection .fixup, "ax"
+-2: movl $0, PT_FS(%esp)
++4: movl $0, PT_FS(%esp)
++ jmp 1b
++5: movl $0, PT_DS(%esp)
++ jmp 1b
++6: movl $0, PT_ES(%esp)
+ jmp 1b
+ .popsection
+- _ASM_EXTABLE(1b, 2b)
++ _ASM_EXTABLE(1b, 4b)
++ _ASM_EXTABLE(2b, 5b)
++ _ASM_EXTABLE(3b, 6b)
+ PTGS_TO_GS_EX
+ ENDPROC(entry_SYSENTER_32)
+
+@@ -410,6 +601,11 @@ ENTRY(entry_INT80_32)
+ pushl %eax # save orig_eax
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ # system call tracing in operation / emulation
+ testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
+ jnz syscall_trace_entry
+@@ -429,6 +625,15 @@ syscall_exit:
+ testl $_TIF_ALLWORK_MASK, %ecx # current->work
+ jnz syscall_exit_work
+
++restore_all_pax:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ movl %esp, %eax
++ call pax_randomize_kstack
++#endif
++
++ pax_erase_kstack
++
+ restore_all:
+ TRACE_IRQS_IRET
+ restore_all_notrace:
+@@ -483,14 +688,34 @@ ldt_ss:
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
++#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
+ mov %esp, %edx /* load kernel esp */
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ shr $16, %edx
+- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
+- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %esi
++ btr $X86_CR0_WP_BIT, %esi
++ mov %esi, %cr0
++#endif
++
++ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
++ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ bts $X86_CR0_WP_BIT, %esi
++ mov %esi, %cr0
++#endif
++
+ pushl $__ESPFIX_SS
+ pushl %eax /* new kernel esp */
+ /*
+@@ -519,20 +744,18 @@ work_resched:
+ movl TI_flags(%ebp), %ecx
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+- jz restore_all
++ jz restore_all_pax
+ testb $_TIF_NEED_RESCHED, %cl
+ jnz work_resched
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
++ movl %esp, %eax
+ #ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+- movl %esp, %eax
+ jnz work_notifysig_v86 # returning to kernel-space or
+ # vm86-space
+ 1:
+-#else
+- movl %esp, %eax
+ #endif
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+@@ -553,7 +776,7 @@ work_notifysig_v86:
+ movl %eax, %esp
+ jmp 1b
+ #endif
+-END(work_pending)
++ENDPROC(work_pending)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -561,11 +784,14 @@ syscall_trace_entry:
+ movl $-ENOSYS, PT_EAX(%esp)
+ movl %esp, %eax
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /* What it returned is what we'll actually use. */
+ cmpl $(NR_syscalls), %eax
+ jnae syscall_call
+ jmp syscall_exit
+-END(syscall_trace_entry)
++ENDPROC(syscall_trace_entry)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -578,24 +804,28 @@ syscall_exit_work:
+ movl %esp, %eax
+ call syscall_trace_leave
+ jmp resume_userspace
+-END(syscall_exit_work)
++ENDPROC(syscall_exit_work)
+
+ syscall_fault:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %ss
++ pop %ds
++#endif
+ ASM_CLAC
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT, PT_EAX(%esp)
+ jmp resume_userspace
+-END(syscall_fault)
++ENDPROC(syscall_fault)
+
+ syscall_badsys:
+ movl $-ENOSYS, %eax
+ jmp syscall_after_call
+-END(syscall_badsys)
++ENDPROC(syscall_badsys)
+
+ sysenter_badsys:
+ movl $-ENOSYS, %eax
+ jmp sysenter_after_call
+-END(sysenter_badsys)
++ENDPROC(sysenter_badsys)
+
+ .macro FIXUP_ESPFIX_STACK
+ /*
+@@ -607,8 +837,15 @@ END(sysenter_badsys)
+ */
+ #ifdef CONFIG_X86_ESPFIX32
+ /* fixup the stack */
+- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
++ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
++ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
+ shl $16, %eax
+ addl %esp, %eax /* the adjusted stack pointer */
+ pushl $__KERNEL_DS
+@@ -644,7 +881,7 @@ ENTRY(irq_entries_start)
+ jmp common_interrupt
+ .align 8
+ .endr
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ /*
+ * the CPU automatically disables interrupts when executing an IRQ vector,
+@@ -691,7 +928,7 @@ ENTRY(coprocessor_error)
+ pushl $0
+ pushl $do_coprocessor_error
+ jmp error_code
+-END(coprocessor_error)
++ENDPROC(coprocessor_error)
+
+ ENTRY(simd_coprocessor_error)
+ ASM_CLAC
+@@ -705,25 +942,25 @@ ENTRY(simd_coprocessor_error)
+ pushl $do_simd_coprocessor_error
+ #endif
+ jmp error_code
+-END(simd_coprocessor_error)
++ENDPROC(simd_coprocessor_error)
+
+ ENTRY(device_not_available)
+ ASM_CLAC
+ pushl $-1 # mark this as an int
+ pushl $do_device_not_available
+ jmp error_code
+-END(device_not_available)
++ENDPROC(device_not_available)
+
+ #ifdef CONFIG_PARAVIRT
+ ENTRY(native_iret)
+ iret
+ _ASM_EXTABLE(native_iret, iret_exc)
+-END(native_iret)
++ENDPROC(native_iret)
+
+ ENTRY(native_irq_enable_sysexit)
+ sti
+ sysexit
+-END(native_irq_enable_sysexit)
++ENDPROC(native_irq_enable_sysexit)
+ #endif
+
+ ENTRY(overflow)
+@@ -731,59 +968,59 @@ ENTRY(overflow)
+ pushl $0
+ pushl $do_overflow
+ jmp error_code
+-END(overflow)
++ENDPROC(overflow)
+
+ ENTRY(bounds)
+ ASM_CLAC
+ pushl $0
+ pushl $do_bounds
+ jmp error_code
+-END(bounds)
++ENDPROC(bounds)
+
+ ENTRY(invalid_op)
+ ASM_CLAC
+ pushl $0
+ pushl $do_invalid_op
+ jmp error_code
+-END(invalid_op)
++ENDPROC(invalid_op)
+
+ ENTRY(coprocessor_segment_overrun)
+ ASM_CLAC
+ pushl $0
+ pushl $do_coprocessor_segment_overrun
+ jmp error_code
+-END(coprocessor_segment_overrun)
++ENDPROC(coprocessor_segment_overrun)
+
+ ENTRY(invalid_TSS)
+ ASM_CLAC
+ pushl $do_invalid_TSS
+ jmp error_code
+-END(invalid_TSS)
++ENDPROC(invalid_TSS)
+
+ ENTRY(segment_not_present)
+ ASM_CLAC
+ pushl $do_segment_not_present
+ jmp error_code
+-END(segment_not_present)
++ENDPROC(segment_not_present)
+
+ ENTRY(stack_segment)
+ ASM_CLAC
+ pushl $do_stack_segment
+ jmp error_code
+-END(stack_segment)
++ENDPROC(stack_segment)
+
+ ENTRY(alignment_check)
+ ASM_CLAC
+ pushl $do_alignment_check
+ jmp error_code
+-END(alignment_check)
++ENDPROC(alignment_check)
+
+ ENTRY(divide_error)
+ ASM_CLAC
+ pushl $0 # no error code
+ pushl $do_divide_error
+ jmp error_code
+-END(divide_error)
++ENDPROC(divide_error)
+
+ #ifdef CONFIG_X86_MCE
+ ENTRY(machine_check)
+@@ -791,7 +1028,7 @@ ENTRY(machine_check)
+ pushl $0
+ pushl machine_check_vector
+ jmp error_code
+-END(machine_check)
++ENDPROC(machine_check)
+ #endif
+
+ ENTRY(spurious_interrupt_bug)
+@@ -799,7 +1036,7 @@ ENTRY(spurious_interrupt_bug)
+ pushl $0
+ pushl $do_spurious_interrupt_bug
+ jmp error_code
+-END(spurious_interrupt_bug)
++ENDPROC(spurious_interrupt_bug)
+
+ #ifdef CONFIG_XEN
+ /*
+@@ -906,7 +1143,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+
+ ENTRY(mcount)
+ ret
+-END(mcount)
++ENDPROC(mcount)
+
+ ENTRY(ftrace_caller)
+ pushl %eax
+@@ -936,7 +1173,7 @@ ftrace_graph_call:
+ .globl ftrace_stub
+ ftrace_stub:
+ ret
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+
+ ENTRY(ftrace_regs_caller)
+ pushf /* push flags before compare (in cs location) */
+@@ -1034,7 +1271,7 @@ trace:
+ popl %ecx
+ popl %eax
+ jmp ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+
+@@ -1052,7 +1289,7 @@ ENTRY(ftrace_graph_caller)
+ popl %ecx
+ popl %eax
+ ret
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+
+ .globl return_to_handler
+ return_to_handler:
+@@ -1100,14 +1337,17 @@ error_code:
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ REG_TO_PTGS %ecx
+ SET_KERNEL_GS %ecx
+- movl $(__USER_DS), %ecx
++ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
++
++ pax_enter_kernel
++
+ TRACE_IRQS_OFF
+ movl %esp, %eax # pt_regs pointer
+ call *%edi
+ jmp ret_from_exception
+-END(page_fault)
++ENDPROC(page_fault)
+
+ /*
+ * Debug traps and NMI can happen at the one SYSENTER instruction
+@@ -1145,7 +1385,7 @@ debug_stack_correct:
+ movl %esp, %eax # pt_regs pointer
+ call do_debug
+ jmp ret_from_exception
+-END(debug)
++ENDPROC(debug)
+
+ /*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+@@ -1184,6 +1424,9 @@ nmi_stack_correct:
+ xorl %edx, %edx # zero error code
+ movl %esp, %eax # pt_regs pointer
+ call do_nmi
++
++ pax_exit_kernel
++
+ jmp restore_all_notrace
+
+ nmi_stack_fixup:
+@@ -1217,11 +1460,14 @@ nmi_espfix_stack:
+ FIXUP_ESPFIX_STACK # %eax == %esp
+ xorl %edx, %edx # zero error code
+ call do_nmi
++
++ pax_exit_kernel
++
+ RESTORE_REGS
+ lss 12+4(%esp), %esp # back to espfix stack
+ jmp irq_return
+ #endif
+-END(nmi)
++ENDPROC(nmi)
+
+ ENTRY(int3)
+ ASM_CLAC
+@@ -1232,17 +1478,17 @@ ENTRY(int3)
+ movl %esp, %eax # pt_regs pointer
+ call do_int3
+ jmp ret_from_exception
+-END(int3)
++ENDPROC(int3)
+
+ ENTRY(general_protection)
+ pushl $do_general_protection
+ jmp error_code
+-END(general_protection)
++ENDPROC(general_protection)
+
+ #ifdef CONFIG_KVM_GUEST
+ ENTRY(async_page_fault)
+ ASM_CLAC
+ pushl $do_async_page_fault
+ jmp error_code
+-END(async_page_fault)
++ENDPROC(async_page_fault)
+ #endif
+diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
+index d330840..4f1925e 100644
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -37,6 +37,8 @@
+ #include <asm/smap.h>
+ #include <asm/pgtable_types.h>
+ #include <linux/err.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -54,6 +56,402 @@ ENTRY(native_usergs_sysret64)
+ ENDPROC(native_usergs_sysret64)
+ #endif /* CONFIG_PARAVIRT */
+
++ .macro ljmpq sel, off
++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
++ .byte 0x48; ljmp *1234f(%rip)
++ .pushsection .rodata
++ .align 16
++ 1234: .quad \off; .word \sel
++ .popsection
++#else
++ pushq $\sel
++ pushq $\off
++ lretq
++#endif
++ .endm
++
++ .macro pax_enter_kernel
++ pax_set_fptr_mask
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ call pax_enter_kernel
++#endif
++ .endm
++
++ .macro pax_exit_kernel
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ call pax_exit_kernel
++#endif
++ .endm
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ENTRY(pax_enter_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $X86_CR0_WP_BIT,%rdi
++ jnc 3f
++ mov %cs,%edi
++ cmp $__KERNEL_CS,%edi
++ jnz 2f
++1:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++ GET_CR3_INTO_RDI
++ cmp $0,%dil
++ jnz 112f
++ mov $__KERNEL_DS,%edi
++ mov %edi,%ss
++ jmp 111f
++112: cmp $1,%dil
++ jz 113f
++ ud2
++113: sub $4097,%rdi
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ mov $__UDEREF_KERNEL_DS,%edi
++ mov %edi,%ss
++111:
++#endif
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++#ifdef CONFIG_PAX_KERNEXEC
++2: ljmpq __KERNEL_CS,1b
++3: ljmpq __KERNEXEC_KERNEL_CS,4f
++4: SET_RDI_INTO_CR0
++ jmp 1b
++#endif
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cs,%rdi
++ cmp $__KERNEXEC_KERNEL_CS,%edi
++ jz 2f
++ GET_CR0_INTO_RDI
++ bts $X86_CR0_WP_BIT,%rdi
++ jnc 4f
++1:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++ mov %ss,%edi
++ cmp $__UDEREF_KERNEL_DS,%edi
++ jnz 111f
++ GET_CR3_INTO_RDI
++ cmp $0,%dil
++ jz 112f
++ ud2
++112: add $4097,%rdi
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ mov $__KERNEL_DS,%edi
++ mov %edi,%ss
++111:
++#endif
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++#ifdef CONFIG_PAX_KERNEXEC
++2: GET_CR0_INTO_RDI
++ btr $X86_CR0_WP_BIT,%rdi
++ jnc 4f
++ ljmpq __KERNEL_CS,3f
++3: SET_RDI_INTO_CR0
++ jmp 1b
++4: ud2
++ jmp 4b
++#endif
++ENDPROC(pax_exit_kernel)
++#endif
++
++ .macro pax_enter_kernel_user
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ pushq %r11
++ call pax_randomize_kstack
++ popq %r11
++ popq %rax
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ENTRY(pax_enter_kernel_user)
++ pushq %rdi
++ pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++ GET_CR3_INTO_RDI
++ cmp $1,%dil
++ jnz 4f
++ sub $4097,%rdi
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ jmp 3f
++111:
++
++ GET_CR3_INTO_RDI
++ mov %rdi,%rbx
++ add $__START_KERNEL_map,%rbx
++ sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ pushq %rdi
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ popq %rdi
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0,i*8(%rbx)
++ i = i + 1
++ .endr
++
++2: SET_RDI_INTO_CR3
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $X86_CR0_WP_BIT,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++3:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rbx
++ popq %rdi
++ pax_force_retaddr
++ retq
++4: ud2
++ENDPROC(pax_enter_kernel_user)
++
++ENTRY(pax_exit_kernel_user)
++ pushq %rdi
++ pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR3_INTO_RDI
++ ALTERNATIVE "jmp 1f", "", X86_FEATURE_PCID
++ cmp $0,%dil
++ jnz 3f
++ add $4097,%rdi
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ jmp 2f
++1:
++
++ mov %rdi,%rbx
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ btr $X86_CR0_WP_BIT,%rdi
++ jnc 3f
++ SET_RDI_INTO_CR0
++#endif
++
++ add $__START_KERNEL_map,%rbx
++ sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0x67,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0x67,i*8(%rbx)
++ i = i + 1
++ .endr
++2:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rbx
++ popq %rdi
++ pax_force_retaddr
++ retq
++3: ud2
++ENDPROC(pax_exit_kernel_user)
++#endif
++
++ .macro pax_enter_kernel_nmi
++ pax_set_fptr_mask
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $X86_CR0_WP_BIT,%rdi
++ jc 110f
++ SET_RDI_INTO_CR0
++ or $2,%ebx
++110:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++ GET_CR3_INTO_RDI
++ cmp $0,%dil
++ jz 111f
++ sub $4097,%rdi
++ or $4,%ebx
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ mov $__UDEREF_KERNEL_DS,%edi
++ mov %edi,%ss
++111:
++#endif
++ .endm
++
++ .macro pax_exit_kernel_nmi
++#ifdef CONFIG_PAX_KERNEXEC
++ btr $1,%ebx
++ jnc 110f
++ GET_CR0_INTO_RDI
++ btr $X86_CR0_WP_BIT,%rdi
++ SET_RDI_INTO_CR0
++110:
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ALTERNATIVE "jmp 111f", "", X86_FEATURE_PCID
++ btr $2,%ebx
++ jnc 111f
++ GET_CR3_INTO_RDI
++ add $4097,%rdi
++ bts $63,%rdi
++ SET_RDI_INTO_CR3
++ mov $__KERNEL_DS,%edi
++ mov %edi,%ss
++111:
++#endif
++ .endm
++
++ .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ENTRY(pax_erase_kstack)
++ pushq %rdi
++ pushq %rcx
++ pushq %rax
++ pushq %r11
++
++ GET_THREAD_INFO(%r11)
++ mov TI_lowest_stack(%r11), %rdi
++ mov $-0xBEEF, %rax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $3, %ecx
++ repne scasq
++ jecxz 2f
++
++ cmp $2*8, %ecx
++ jc 2f
++
++ mov $2*8, %ecx
++ repe scasq
++ jecxz 2f
++ jne 1b
++
++2: cld
++ or $2*8, %rdi
++ mov %esp, %ecx
++ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %rcx
++ jb 3f
++ ud2
++3:
++
++ shr $3, %ecx
++ rep stosq
++
++ mov TI_task_thread_sp0(%r11), %rdi
++ sub $256, %rdi
++ mov %rdi, TI_lowest_stack(%r11)
++
++ popq %r11
++ popq %rax
++ popq %rcx
++ popq %rdi
++ pax_force_retaddr
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
++
+ .macro TRACE_IRQS_IRETQ
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ bt $9, EFLAGS(%rsp) /* interrupts off? */
+@@ -89,7 +487,7 @@ ENDPROC(native_usergs_sysret64)
+ .endm
+
+ .macro TRACE_IRQS_IRETQ_DEBUG
+- bt $9, EFLAGS(%rsp) /* interrupts off? */
++ bt $X86_EFLAGS_IF_BIT, EFLAGS(%rsp) /* interrupts off? */
+ jnc 1f
+ TRACE_IRQS_ON_DEBUG
+ 1:
+@@ -149,14 +547,6 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+ /* Construct struct pt_regs on stack */
+ pushq $__USER_DS /* pt_regs->ss */
+ pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
+- /*
+- * Re-enable interrupts.
+- * We use 'rsp_scratch' as a scratch space, hence irq-off block above
+- * must execute atomically in the face of possible interrupt-driven
+- * task preemption. We must enable interrupts only after we're done
+- * with using rsp_scratch:
+- */
+- ENABLE_INTERRUPTS(CLBR_NONE)
+ pushq %r11 /* pt_regs->flags */
+ pushq $__USER_CS /* pt_regs->cs */
+ pushq %rcx /* pt_regs->ip */
+@@ -172,7 +562,27 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
+ pushq %r11 /* pt_regs->r11 */
+ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
+
+- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq %r12, R12(%rsp)
++#endif
++
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
++ /*
++ * Re-enable interrupts.
++ * We use 'rsp_scratch' as a scratch space, hence irq-off block above
++ * must execute atomically in the face of possible interrupt-driven
++ * task preemption. We must enable interrupts only after we're done
++ * with using rsp_scratch:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
++
++ GET_THREAD_INFO(%rcx)
++ testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%rcx)
+ jnz tracesys
+ entry_SYSCALL_64_fastpath:
+ #if __SYSCALL_MASK == ~0
+@@ -205,9 +615,13 @@ entry_SYSCALL_64_fastpath:
+ * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
+ * very bad.
+ */
+- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ GET_THREAD_INFO(%rcx)
++ testl $_TIF_ALLWORK_MASK, TI_flags(%rcx)
+ jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
+
++ pax_exit_kernel_user
++ pax_erase_kstack
++
+ RESTORE_C_REGS_EXCEPT_RCX_R11
+ movq RIP(%rsp), %rcx
+ movq EFLAGS(%rsp), %r11
+@@ -236,6 +650,9 @@ tracesys:
+ call syscall_trace_enter_phase1
+ test %rax, %rax
+ jnz tracesys_phase2 /* if needed, run the slow path */
++
++ pax_erase_kstack
++
+ RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
+ movq ORIG_RAX(%rsp), %rax
+ jmp entry_SYSCALL_64_fastpath /* and return to the fast path */
+@@ -247,6 +664,8 @@ tracesys_phase2:
+ movq %rax, %rdx
+ call syscall_trace_enter_phase2
+
++ pax_erase_kstack
++
+ /*
+ * Reload registers from stack in case ptrace changed them.
+ * We don't reload %rax because syscall_trace_entry_phase2() returned
+@@ -284,6 +703,8 @@ GLOBAL(int_with_check)
+ andl %edi, %edx
+ jnz int_careful
+ andl $~TS_COMPAT, TI_status(%rcx)
++ pax_exit_kernel_user
++ pax_erase_kstack
+ jmp syscall_return
+
+ /*
+@@ -407,14 +828,14 @@ syscall_return_via_sysret:
+ opportunistic_sysret_failed:
+ SWAPGS
+ jmp restore_c_regs_and_iret
+-END(entry_SYSCALL_64)
++ENDPROC(entry_SYSCALL_64)
+
+
+ .macro FORK_LIKE func
+ ENTRY(stub_\func)
+ SAVE_EXTRA_REGS 8
+ jmp sys_\func
+-END(stub_\func)
++ENDPROC(stub_\func)
+ .endm
+
+ FORK_LIKE clone
+@@ -434,7 +855,7 @@ return_from_execve:
+ ZERO_EXTRA_REGS
+ movq %rax, RAX(%rsp)
+ jmp int_ret_from_sys_call
+-END(stub_execve)
++ENDPROC(stub_execve)
+ /*
+ * Remaining execve stubs are only 7 bytes long.
+ * ENTRY() often aligns to 16 bytes, which in this case has no benefits.
+@@ -443,7 +864,7 @@ END(stub_execve)
+ GLOBAL(stub_execveat)
+ call sys_execveat
+ jmp return_from_execve
+-END(stub_execveat)
++ENDPROC(stub_execveat)
+
+ #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
+ .align 8
+@@ -451,15 +872,15 @@ GLOBAL(stub_x32_execve)
+ GLOBAL(stub32_execve)
+ call compat_sys_execve
+ jmp return_from_execve
+-END(stub32_execve)
+-END(stub_x32_execve)
++ENDPROC(stub32_execve)
++ENDPROC(stub_x32_execve)
+ .align 8
+ GLOBAL(stub_x32_execveat)
+ GLOBAL(stub32_execveat)
+ call compat_sys_execveat
+ jmp return_from_execve
+-END(stub32_execveat)
+-END(stub_x32_execveat)
++ENDPROC(stub32_execveat)
++ENDPROC(stub_x32_execveat)
+ #endif
+
+ /*
+@@ -488,7 +909,7 @@ ENTRY(stub_x32_rt_sigreturn)
+ SAVE_EXTRA_REGS 8
+ call sys32_x32_rt_sigreturn
+ jmp return_from_stub
+-END(stub_x32_rt_sigreturn)
++ENDPROC(stub_x32_rt_sigreturn)
+ #endif
+
+ /*
+@@ -527,7 +948,7 @@ ENTRY(ret_from_fork)
+ movl $0, RAX(%rsp)
+ RESTORE_EXTRA_REGS
+ jmp int_ret_from_sys_call
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+
+ /*
+ * Build the entry stubs with some assembler magic.
+@@ -542,7 +963,7 @@ ENTRY(irq_entries_start)
+ jmp common_interrupt
+ .align 8
+ .endr
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ /*
+ * Interrupt entry/exit.
+@@ -555,21 +976,13 @@ END(irq_entries_start)
+ /* 0(%rsp): ~(interrupt number) */
+ .macro interrupt func
+ cld
+- /*
+- * Since nothing in interrupt handling code touches r12...r15 members
+- * of "struct pt_regs", and since interrupts can nest, we can save
+- * four stack slots and simultaneously provide
+- * an unwind-friendly stack layout by saving "truncated" pt_regs
+- * exactly up to rbp slot, without these members.
+- */
+- ALLOC_PT_GPREGS_ON_STACK -RBP
+- SAVE_C_REGS -RBP
+- /* this goes to 0(%rsp) for unwinder, not for saving the value: */
+- SAVE_EXTRA_REGS_RBP -RBP
++ ALLOC_PT_GPREGS_ON_STACK
++ SAVE_C_REGS
++ SAVE_EXTRA_REGS
+
+- leaq -RBP(%rsp), %rdi /* arg1 for \func (pointer to pt_regs) */
++ movq %rsp, %rdi /* arg1 for \func (pointer to pt_regs) */
+
+- testb $3, CS-RBP(%rsp)
++ testb $3, CS(%rsp)
+ jz 1f
+ SWAPGS
+ 1:
+@@ -584,6 +997,18 @@ END(irq_entries_start)
+ incl PER_CPU_VAR(irq_count)
+ cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
+ pushq %rsi
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rdi)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
++
+ /* We entered an interrupt context - irqs are off: */
+ TRACE_IRQS_OFF
+
+@@ -608,7 +1033,7 @@ ret_from_intr:
+ /* Restore saved previous stack */
+ popq %rsi
+ /* return code expects complete pt_regs - adjust rsp accordingly: */
+- leaq -RBP(%rsi), %rsp
++ movq %rsi, %rsp
+
+ testb $3, CS(%rsp)
+ jz retint_kernel
+@@ -630,6 +1055,8 @@ retint_swapgs: /* return to user-space */
+ * The iretq could re-enable interrupts:
+ */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel_user
++# pax_erase_kstack
+ TRACE_IRQS_IRETQ
+
+ SWAPGS
+@@ -648,6 +1075,21 @@ retint_kernel:
+ jmp 0b
+ 1:
+ #endif
++
++ pax_exit_kernel
++
++#if defined(CONFIG_EFI) && defined(CONFIG_PAX_KERNEXEC)
++ /* This is a quirk to allow IRQs/NMIs/MCEs during early EFI setup,
++ * namely calling EFI runtime services with a phys mapping. We're
++ * starting off with NOPs and patch in the real instrumentation
++ * (BTS/OR) before starting any userland process; even before starting
++ * up the APs.
++ */
++ ALTERNATIVE "", "pax_force_retaddr 16*8", X86_FEATURE_ALWAYS
++#else
++ pax_force_retaddr RIP
++#endif
++
+ /*
+ * The iretq could re-enable interrupts:
+ */
+@@ -689,15 +1131,15 @@ native_irq_return_ldt:
+ SWAPGS
+ movq PER_CPU_VAR(espfix_waddr), %rdi
+ movq %rax, (0*8)(%rdi) /* RAX */
+- movq (2*8)(%rsp), %rax /* RIP */
++ movq (2*8 + RIP-RIP)(%rsp), %rax /* RIP */
+ movq %rax, (1*8)(%rdi)
+- movq (3*8)(%rsp), %rax /* CS */
++ movq (2*8 + CS-RIP)(%rsp), %rax /* CS */
+ movq %rax, (2*8)(%rdi)
+- movq (4*8)(%rsp), %rax /* RFLAGS */
++ movq (2*8 + EFLAGS-RIP)(%rsp), %rax /* RFLAGS */
+ movq %rax, (3*8)(%rdi)
+- movq (6*8)(%rsp), %rax /* SS */
++ movq (2*8 + SS-RIP)(%rsp), %rax /* SS */
+ movq %rax, (5*8)(%rdi)
+- movq (5*8)(%rsp), %rax /* RSP */
++ movq (2*8 + RSP-RIP)(%rsp), %rax /* RSP */
+ movq %rax, (4*8)(%rdi)
+ andl $0xffff0000, %eax
+ popq %rdi
+@@ -738,7 +1180,7 @@ retint_signal:
+ GET_THREAD_INFO(%rcx)
+ jmp retint_with_reschedule
+
+-END(common_interrupt)
++ENDPROC(common_interrupt)
+
+ /*
+ * APIC interrupts.
+@@ -750,7 +1192,7 @@ ENTRY(\sym)
+ .Lcommon_\sym:
+ interrupt \do_sym
+ jmp ret_from_intr
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ #ifdef CONFIG_TRACING
+@@ -815,7 +1257,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
+ /*
+ * Exception entry points.
+ */
+-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
++#define CPU_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
+
+ .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
+ ENTRY(\sym)
+@@ -862,6 +1304,12 @@ ENTRY(\sym)
+ .endif
+
+ .if \shift_ist != -1
++#ifdef CONFIG_SMP
++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
++ lea cpu_tss(%r13), %r13
++#else
++ lea cpu_tss(%rip), %r13
++#endif
+ subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
+ .endif
+
+@@ -905,7 +1353,7 @@ ENTRY(\sym)
+
+ jmp error_exit /* %ebx: no swapgs flag */
+ .endif
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ #ifdef CONFIG_TRACING
+@@ -947,8 +1395,9 @@ gs_change:
+ 2: mfence /* workaround */
+ SWAPGS
+ popfq
++ pax_force_retaddr
+ ret
+-END(native_load_gs_index)
++ENDPROC(native_load_gs_index)
+
+ _ASM_EXTABLE(gs_change, bad_gs)
+ .section .fixup, "ax"
+@@ -970,8 +1419,9 @@ ENTRY(do_softirq_own_stack)
+ call __do_softirq
+ leaveq
+ decl PER_CPU_VAR(irq_count)
++ pax_force_retaddr
+ ret
+-END(do_softirq_own_stack)
++ENDPROC(do_softirq_own_stack)
+
+ #ifdef CONFIG_XEN
+ idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
+@@ -1007,7 +1457,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
+ call xen_maybe_preempt_hcall
+ #endif
+ jmp error_exit
+-END(xen_do_hypervisor_callback)
++ENDPROC(xen_do_hypervisor_callback)
+
+ /*
+ * Hypervisor uses this for application faults while it executes.
+@@ -1052,7 +1502,7 @@ ENTRY(xen_failsafe_callback)
+ SAVE_C_REGS
+ SAVE_EXTRA_REGS
+ jmp error_exit
+-END(xen_failsafe_callback)
++ENDPROC(xen_failsafe_callback)
+
+ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
+ xen_hvm_callback_vector xen_evtchn_do_upcall
+@@ -1101,8 +1551,36 @@ ENTRY(paranoid_entry)
+ js 1f /* negative -> in kernel */
+ SWAPGS
+ xorl %ebx, %ebx
+-1: ret
+-END(paranoid_entry)
++1:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS+8(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
++ pax_force_retaddr
++ ret
++ENDPROC(paranoid_entry)
++
++ENTRY(paranoid_entry_nmi)
++ cld
++ SAVE_C_REGS 8
++ SAVE_EXTRA_REGS 8
++ movl $1, %ebx
++ movl $MSR_GS_BASE, %ecx
++ rdmsr
++ testl %edx, %edx
++ js 1f /* negative -> in kernel */
++ SWAPGS
++ xorl %ebx, %ebx
++1: pax_enter_kernel_nmi
++ pax_force_retaddr
++ ret
++ENDPROC(paranoid_entry_nmi)
+
+ /*
+ * "Paranoid" exit path from exception stack. This is invoked
+@@ -1119,19 +1597,26 @@ END(paranoid_entry)
+ ENTRY(paranoid_exit)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF_DEBUG
+- testl %ebx, %ebx /* swapgs needed? */
++ testl $1, %ebx /* swapgs needed? */
+ jnz paranoid_exit_no_swapgs
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel_user
++#else
++ pax_exit_kernel
++#endif
+ TRACE_IRQS_IRETQ
+ SWAPGS_UNSAFE_STACK
+ jmp paranoid_exit_restore
+ paranoid_exit_no_swapgs:
++ pax_exit_kernel
+ TRACE_IRQS_IRETQ_DEBUG
+ paranoid_exit_restore:
+ RESTORE_EXTRA_REGS
+ RESTORE_C_REGS
+ REMOVE_PT_GPREGS_FROM_STACK 8
++ pax_force_retaddr_bts
+ INTERRUPT_RETURN
+-END(paranoid_exit)
++ENDPROC(paranoid_exit)
+
+ /*
+ * Save all registers in pt_regs, and switch gs if needed.
+@@ -1149,7 +1634,18 @@ ENTRY(error_entry)
+ SWAPGS
+
+ error_entry_done:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS+8(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ TRACE_IRQS_OFF
++ pax_force_retaddr
+ ret
+
+ /*
+@@ -1199,7 +1695,7 @@ error_bad_iret:
+ mov %rax, %rsp
+ decl %ebx
+ jmp error_entry_done
+-END(error_entry)
++ENDPROC(error_entry)
+
+
+ /*
+@@ -1212,10 +1708,10 @@ ENTRY(error_exit)
+ RESTORE_EXTRA_REGS
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl %eax, %eax
++ testl $1, %eax
+ jnz retint_kernel
+ jmp retint_user
+-END(error_exit)
++ENDPROC(error_exit)
+
+ /* Runs on exception stack */
+ ENTRY(nmi)
+@@ -1269,6 +1765,8 @@ ENTRY(nmi)
+ * other IST entries.
+ */
+
++ ASM_CLAC
++
+ /* Use %rdx as our temp variable throughout */
+ pushq %rdx
+
+@@ -1312,6 +1810,12 @@ ENTRY(nmi)
+ pushq %r14 /* pt_regs->r14 */
+ pushq %r15 /* pt_regs->r15 */
+
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ xorl %ebx, %ebx
++#endif
++
++ pax_enter_kernel_nmi
++
+ /*
+ * At this point we no longer need to worry about stack damage
+ * due to nesting -- we're on the normal thread stack and we're
+@@ -1322,12 +1826,19 @@ ENTRY(nmi)
+ movq $-1, %rsi
+ call do_nmi
+
++ pax_exit_kernel_nmi
++
+ /*
+ * Return back to user mode. We must *not* do the normal exit
+ * work, because we don't want to enable interrupts. Fortunately,
+ * do_nmi doesn't modify pt_regs.
+ */
+ SWAPGS
++
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ movq RBX(%rsp), %rbx
++#endif
++
+ jmp restore_c_regs_and_iret
+
+ .Lnmi_from_kernel:
+@@ -1449,6 +1960,7 @@ nested_nmi_out:
+ popq %rdx
+
+ /* We are returning to kernel mode, so this cannot result in a fault. */
++# pax_force_retaddr_bts
+ INTERRUPT_RETURN
+
+ first_nmi:
+@@ -1522,20 +2034,22 @@ end_repeat_nmi:
+ ALLOC_PT_GPREGS_ON_STACK
+
+ /*
+- * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
++ * Use paranoid_entry_nmi to handle SWAPGS, but no need to use paranoid_exit
+ * as we should not be calling schedule in NMI context.
+ * Even with normal interrupts enabled. An NMI should not be
+ * setting NEED_RESCHED or anything that normal interrupts and
+ * exceptions might do.
+ */
+- call paranoid_entry
++ call paranoid_entry_nmi
+
+ /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+ movq %rsp, %rdi
+ movq $-1, %rsi
+ call do_nmi
+
+- testl %ebx, %ebx /* swapgs needed? */
++ pax_exit_kernel_nmi
++
++ testl $1, %ebx /* swapgs needed? */
+ jnz nmi_restore
+ nmi_swapgs:
+ SWAPGS_UNSAFE_STACK
+@@ -1546,6 +2060,8 @@ nmi_restore:
+ /* Point RSP at the "iret" frame. */
+ REMOVE_PT_GPREGS_FROM_STACK 6*8
+
++ pax_force_retaddr_bts
++
+ /*
+ * Clear "NMI executing". Set DF first so that we can easily
+ * distinguish the remaining code between here and IRET from
+@@ -1563,9 +2079,9 @@ nmi_restore:
+ * mode, so this cannot result in a fault.
+ */
+ INTERRUPT_RETURN
+-END(nmi)
++ENDPROC(nmi)
+
+ ENTRY(ignore_sysret)
+ mov $-ENOSYS, %eax
+ sysret
+-END(ignore_sysret)
++ENDPROC(ignore_sysret)
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index a7e257d..3a6ad23 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -13,8 +13,10 @@
+ #include <asm/irqflags.h>
+ #include <asm/asm.h>
+ #include <asm/smap.h>
++#include <asm/pgtable.h>
+ #include <linux/linkage.h>
+ #include <linux/err.h>
++#include <asm/alternative-asm.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -35,6 +37,32 @@ ENTRY(native_usergs_sysret32)
+ ENDPROC(native_usergs_sysret32)
+ #endif
+
++ .macro pax_enter_kernel_user
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ pushq %r11
++ call pax_randomize_kstack
++ popq %r11
++ popq %rax
++#endif
++ .endm
++
++ .macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++ .endm
++
+ /*
+ * 32-bit SYSENTER instruction entry.
+ *
+@@ -65,20 +93,21 @@ ENTRY(entry_SYSENTER_compat)
+ */
+ SWAPGS_UNSAFE_STACK
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+- ENABLE_INTERRUPTS(CLBR_NONE)
+
+ /* Zero-extending 32-bit regs, do not remove */
+ movl %ebp, %ebp
+ movl %eax, %eax
+
+- movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
++ GET_THREAD_INFO(%r11)
++ movl TI_sysenter_return(%r11), %r11d
+
+ /* Construct struct pt_regs on stack */
+ pushq $__USER32_DS /* pt_regs->ss */
+ pushq %rbp /* pt_regs->sp */
+ pushfq /* pt_regs->flags */
++ orl $X86_EFLAGS_IF,(%rsp)
+ pushq $__USER32_CS /* pt_regs->cs */
+- pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */
++ pushq %r11 /* pt_regs->ip = thread_info->sysenter_return */
+ pushq %rax /* pt_regs->orig_ax */
+ pushq %rdi /* pt_regs->di */
+ pushq %rsi /* pt_regs->si */
+@@ -88,15 +117,37 @@ ENTRY(entry_SYSENTER_compat)
+ cld
+ sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
+
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq %r12, R12(%rsp)
++#endif
++
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
++ ENABLE_INTERRUPTS(CLBR_NONE)
++
+ /*
+ * no need to do an access_ok check here because rbp has been
+ * 32-bit zero extended
+ */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ addq pax_user_shadow_base, %rbp
++ ASM_PAX_OPEN_USERLAND
++#endif
++
+ ASM_STAC
+ 1: movl (%rbp), %ebp
+ _ASM_EXTABLE(1b, ia32_badarg)
+ ASM_CLAC
+
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ASM_PAX_CLOSE_USERLAND
++#endif
++
+ /*
+ * Sysenter doesn't filter flags, so we need to clear NT
+ * ourselves. To save a few cycles, we can check whether
+@@ -106,8 +157,9 @@ ENTRY(entry_SYSENTER_compat)
+ jnz sysenter_fix_flags
+ sysenter_flags_fixed:
+
+- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT, TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%r11)
+ jnz sysenter_tracesys
+
+ sysenter_do_call:
+@@ -123,9 +175,10 @@ sysenter_dispatch:
+ call *ia32_sys_call_table(, %rax, 8)
+ movq %rax, RAX(%rsp)
+ 1:
++ GET_THREAD_INFO(%r11)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ testl $_TIF_ALLWORK_MASK, TI_flags(%r11)
+ jnz sysexit_audit
+ sysexit_from_sys_call:
+ /*
+@@ -138,7 +191,9 @@ sysexit_from_sys_call:
+ * This code path is still called 'sysexit' because it pairs
+ * with 'sysenter' and it uses the SYSENTER calling convention.
+ */
+- andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
++ pax_exit_kernel_user
++ pax_erase_kstack
++ andl $~TS_COMPAT, TI_status(%r11)
+ movl RIP(%rsp), %ecx /* User %eip */
+ movq RAX(%rsp), %rax
+ RESTORE_RSI_RDI
+@@ -194,6 +249,8 @@ sysexit_from_sys_call:
+ movl %eax, %edi /* arg1 (RDI) <= syscall number (EAX) */
+ call __audit_syscall_entry
+
++ pax_erase_kstack
++
+ /*
+ * We are going to jump back to the syscall dispatch code.
+ * Prepare syscall args as required by the 64-bit C ABI.
+@@ -209,7 +266,7 @@ sysexit_from_sys_call:
+ .endm
+
+ .macro auditsys_exit exit
+- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), TI_flags(%r11)
+ jnz ia32_ret_from_sys_call
+ TRACE_IRQS_ON
+ ENABLE_INTERRUPTS(CLBR_NONE)
+@@ -220,10 +277,11 @@ sysexit_from_sys_call:
+ 1: setbe %al /* 1 if error, 0 if not */
+ movzbl %al, %edi /* zero-extend that into %edi */
+ call __audit_syscall_exit
++ GET_THREAD_INFO(%r11)
+ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ testl %edi, TI_flags(%r11)
+ jz \exit
+ xorl %eax, %eax /* Do not leak kernel information */
+ movq %rax, R11(%rsp)
+@@ -249,7 +307,7 @@ sysenter_fix_flags:
+
+ sysenter_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%r11)
+ jz sysenter_auditsys
+ #endif
+ SAVE_EXTRA_REGS
+@@ -269,6 +327,9 @@ sysenter_tracesys:
+ movl %eax, %eax /* zero extension */
+
+ RESTORE_EXTRA_REGS
++
++ pax_erase_kstack
++
+ jmp sysenter_do_call
+ ENDPROC(entry_SYSENTER_compat)
+
+@@ -311,7 +372,6 @@ ENTRY(entry_SYSCALL_compat)
+ SWAPGS_UNSAFE_STACK
+ movl %esp, %r8d
+ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+- ENABLE_INTERRUPTS(CLBR_NONE)
+
+ /* Zero-extending 32-bit regs, do not remove */
+ movl %eax, %eax
+@@ -331,16 +391,41 @@ ENTRY(entry_SYSCALL_compat)
+ pushq $-ENOSYS /* pt_regs->ax */
+ sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
+
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq %r12, R12(%rsp)
++#endif
++
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
++ ENABLE_INTERRUPTS(CLBR_NONE)
++
+ /*
+ * No need to do an access_ok check here because r8 has been
+ * 32-bit zero extended:
+ */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ASM_PAX_OPEN_USERLAND
++ movq pax_user_shadow_base, %r8
++ addq RSP(%rsp), %r8
++#endif
++
+ ASM_STAC
+ 1: movl (%r8), %r9d
+ _ASM_EXTABLE(1b, ia32_badarg)
+ ASM_CLAC
+- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ASM_PAX_CLOSE_USERLAND
++#endif
++
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ jnz cstar_tracesys
+
+ cstar_do_call:
+@@ -358,13 +443,16 @@ cstar_dispatch:
+ call *ia32_sys_call_table(, %rax, 8)
+ movq %rax, RAX(%rsp)
+ 1:
++ GET_THREAD_INFO(%r11)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ testl $_TIF_ALLWORK_MASK, TI_flags(%r11)
+ jnz sysretl_audit
+
+ sysretl_from_sys_call:
+- andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
++ pax_exit_kernel_user
++ pax_erase_kstack
++ andl $~TS_COMPAT, TI_status(%r11)
+ RESTORE_RSI_RDI_RDX
+ movl RIP(%rsp), %ecx
+ movl EFLAGS(%rsp), %r11d
+@@ -403,7 +491,7 @@ sysretl_audit:
+
+ cstar_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%r11)
+ jz cstar_auditsys
+ #endif
+ xchgl %r9d, %ebp
+@@ -426,11 +514,19 @@ cstar_tracesys:
+
+ RESTORE_EXTRA_REGS
+ xchgl %ebp, %r9d
++
++ pax_erase_kstack
++
+ jmp cstar_do_call
+ END(entry_SYSCALL_compat)
+
+ ia32_badarg:
+ ASM_CLAC
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ ASM_PAX_CLOSE_USERLAND
++#endif
++
+ movq $-EFAULT, RAX(%rsp)
+ ia32_ret_from_sys_call:
+ xorl %eax, %eax /* Do not leak kernel information */
+@@ -462,14 +558,8 @@ ia32_ret_from_sys_call:
+ */
+
+ ENTRY(entry_INT80_compat)
+- /*
+- * Interrupts are off on entry.
+- * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+- * it is too small to ever cause noticeable irq latency.
+- */
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
+ SWAPGS
+- ENABLE_INTERRUPTS(CLBR_NONE)
+
+ /* Zero-extending 32-bit regs, do not remove */
+ movl %eax, %eax
+@@ -488,8 +578,26 @@ ENTRY(entry_INT80_compat)
+ cld
+ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
+
+- orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ movq %r12, R12(%rsp)
++#endif
++
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
++ /*
++ * Interrupts are off on entry.
++ * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
++ * it is too small to ever cause noticeable irq latency.
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
++
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT, TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%r11)
+ jnz ia32_tracesys
+
+ ia32_do_call:
+@@ -524,6 +632,9 @@ ia32_tracesys:
+ movl RDI(%rsp), %edi
+ movl %eax, %eax /* zero extension */
+ RESTORE_EXTRA_REGS
++
++ pax_erase_kstack
++
+ jmp ia32_do_call
+ END(entry_INT80_compat)
+
+diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
+index efb2b93..8a9cb8e 100644
+--- a/arch/x86/entry/thunk_64.S
++++ b/arch/x86/entry/thunk_64.S
+@@ -8,6 +8,7 @@
+ #include <linux/linkage.h>
+ #include "calling.h"
+ #include <asm/asm.h>
++#include <asm/alternative-asm.h>
+
+ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
+ .macro THUNK name, func, put_ret_addr_in_rdi=0
+@@ -62,6 +63,7 @@ restore:
+ popq %rdx
+ popq %rsi
+ popq %rdi
++ pax_force_retaddr
+ ret
+ _ASM_NOKPROBE(restore)
+ #endif
+diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
+index e970320..c006fea 100644
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -175,7 +175,7 @@ quiet_cmd_vdso = VDSO $@
+ -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+
+-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
++VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
+ $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
+index 0224987..8deb742 100644
+--- a/arch/x86/entry/vdso/vdso2c.h
++++ b/arch/x86/entry/vdso/vdso2c.h
+@@ -12,7 +12,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
+ unsigned long load_size = -1; /* Work around bogus warning */
+ unsigned long mapping_size;
+ ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
+- int i;
++ unsigned int i;
+ unsigned long j;
+ ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
+ *alt_sec = NULL;
+@@ -83,7 +83,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
+ for (i = 0;
+ i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
+ i++) {
+- int k;
++ unsigned int k;
+ ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
+ GET_LE(&symtab_hdr->sh_entsize) * i;
+ const char *name = raw_addr + GET_LE(&strtab_hdr->sh_offset) +
+diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
+index 1c9f750..cfddb1a 100644
+--- a/arch/x86/entry/vdso/vma.c
++++ b/arch/x86/entry/vdso/vma.c
+@@ -19,10 +19,7 @@
+ #include <asm/page.h>
+ #include <asm/hpet.h>
+ #include <asm/desc.h>
+-
+-#if defined(CONFIG_X86_64)
+-unsigned int __read_mostly vdso64_enabled = 1;
+-#endif
++#include <asm/mman.h>
+
+ void __init init_vdso_image(const struct vdso_image *image)
+ {
+@@ -101,6 +98,11 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
+ .pages = no_pages,
+ };
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ calculate_addr = false;
++#endif
++
+ if (calculate_addr) {
+ addr = vdso_addr(current->mm->start_stack,
+ image->size - image->sym_vvar_start);
+@@ -111,14 +113,14 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
+ down_write(&mm->mmap_sem);
+
+ addr = get_unmapped_area(NULL, addr,
+- image->size - image->sym_vvar_start, 0, 0);
++ image->size - image->sym_vvar_start, 0, MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ text_start = addr - image->sym_vvar_start;
+- current->mm->context.vdso = (void __user *)text_start;
++ mm->context.vdso = text_start;
+
+ /*
+ * MAYWRITE to allow gdb to COW and set breakpoints
+@@ -163,15 +165,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
+ hpet_address >> PAGE_SHIFT,
+ PAGE_SIZE,
+ pgprot_noncached(PAGE_READONLY));
+-
+- if (ret)
+- goto up_fail;
+ }
+ #endif
+
+ up_fail:
+ if (ret)
+- current->mm->context.vdso = NULL;
++ current->mm->context.vdso = 0;
+
+ up_write(&mm->mmap_sem);
+ return ret;
+@@ -191,8 +190,8 @@ static int load_vdso32(void)
+
+ if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
+ current_thread_info()->sysenter_return =
+- current->mm->context.vdso +
+- selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
++ (void __force_user *)(current->mm->context.vdso +
++ selected_vdso32->sym_VDSO32_SYSENTER_RETURN);
+
+ return 0;
+ }
+@@ -201,9 +200,6 @@ static int load_vdso32(void)
+ #ifdef CONFIG_X86_64
+ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ {
+- if (!vdso64_enabled)
+- return 0;
+-
+ return map_vdso(&vdso_image_64, true);
+ }
+
+@@ -212,12 +208,8 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp)
+ {
+ #ifdef CONFIG_X86_X32_ABI
+- if (test_thread_flag(TIF_X32)) {
+- if (!vdso64_enabled)
+- return 0;
+-
++ if (test_thread_flag(TIF_X32))
+ return map_vdso(&vdso_image_x32, true);
+- }
+ #endif
+
+ return load_vdso32();
+@@ -231,15 +223,6 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ #endif
+
+ #ifdef CONFIG_X86_64
+-static __init int vdso_setup(char *s)
+-{
+- vdso64_enabled = simple_strtoul(s, NULL, 0);
+- return 0;
+-}
+-__setup("vdso=", vdso_setup);
+-#endif
+-
+-#ifdef CONFIG_X86_64
+ static void vgetcpu_cpu_init(void *arg)
+ {
+ int cpu = smp_processor_id();
+diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
+index 2dcc6ff..082dc7a 100644
+--- a/arch/x86/entry/vsyscall/vsyscall_64.c
++++ b/arch/x86/entry/vsyscall/vsyscall_64.c
+@@ -38,15 +38,13 @@
+ #define CREATE_TRACE_POINTS
+ #include "vsyscall_trace.h"
+
+-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
++static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
+
+ static int __init vsyscall_setup(char *str)
+ {
+ if (str) {
+ if (!strcmp("emulate", str))
+ vsyscall_mode = EMULATE;
+- else if (!strcmp("native", str))
+- vsyscall_mode = NATIVE;
+ else if (!strcmp("none", str))
+ vsyscall_mode = NONE;
+ else
+@@ -264,8 +262,7 @@ do_ret:
+ return true;
+
+ sigsegv:
+- force_sig(SIGSEGV, current);
+- return true;
++ do_group_exit(SIGKILL);
+ }
+
+ /*
+@@ -283,8 +280,8 @@ static struct vm_operations_struct gate_vma_ops = {
+ static struct vm_area_struct gate_vma = {
+ .vm_start = VSYSCALL_ADDR,
+ .vm_end = VSYSCALL_ADDR + PAGE_SIZE,
+- .vm_page_prot = PAGE_READONLY_EXEC,
+- .vm_flags = VM_READ | VM_EXEC,
++ .vm_page_prot = PAGE_READONLY,
++ .vm_flags = VM_READ,
+ .vm_ops = &gate_vma_ops,
+ };
+
+@@ -325,10 +322,7 @@ void __init map_vsyscall(void)
+ unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
+
+ if (vsyscall_mode != NONE)
+- __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
+- vsyscall_mode == NATIVE
+- ? PAGE_KERNEL_VSYSCALL
+- : PAGE_KERNEL_VVAR);
++ __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
+
+ BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
+ (unsigned long)VSYSCALL_ADDR);
+diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
+index ae6aad1..719d6d9 100644
+--- a/arch/x86/ia32/ia32_aout.c
++++ b/arch/x86/ia32/ia32_aout.c
+@@ -153,6 +153,8 @@ static int aout_core_dump(struct coredump_params *cprm)
+ unsigned long dump_start, dump_size;
+ struct user32 dump;
+
++ memset(&dump, 0, sizeof(dump));
++
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ has_dumped = 1;
+diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
+index ae3a29a..cea65e9 100644
+--- a/arch/x86/ia32/ia32_signal.c
++++ b/arch/x86/ia32/ia32_signal.c
+@@ -216,7 +216,7 @@ asmlinkage long sys32_sigreturn(void)
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_COMPAT_NSIG_WORDS > 1
+ && __copy_from_user((((char *) &set.sig) + 4),
+- &frame->extramask,
++ frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+@@ -336,7 +336,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ sp -= frame_size;
+ /* Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ return (void __user *) sp;
+ }
+
+@@ -381,10 +381,10 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+ } else {
+ /* Return stub is in 32bit vsyscall page */
+ if (current->mm->context.vdso)
+- restorer = current->mm->context.vdso +
+- selected_vdso32->sym___kernel_sigreturn;
++ restorer = (void __force_user *)(current->mm->context.vdso +
++ selected_vdso32->sym___kernel_sigreturn);
+ else
+- restorer = &frame->retcode;
++ restorer = frame->retcode;
+ }
+
+ put_user_try {
+@@ -394,7 +394,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
+ * These are actually not used anymore, but left because some
+ * gdb versions depend on them as a marker.
+ */
+- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -436,7 +436,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+ 0xb8,
+ __NR_ia32_rt_sigreturn,
+ 0x80cd,
+- 0,
++ 0
+ };
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
+@@ -459,16 +459,19 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+
+ if (ksig->ka.sa.sa_flags & SA_RESTORER)
+ restorer = ksig->ka.sa.sa_restorer;
++ else if (current->mm->context.vdso)
++ /* Return stub is in 32bit vsyscall page */
++ restorer = (void __force_user *)(current->mm->context.vdso +
++ selected_vdso32->sym___kernel_rt_sigreturn);
+ else
+- restorer = current->mm->context.vdso +
+- selected_vdso32->sym___kernel_rt_sigreturn;
++ restorer = frame->retcode;
+ put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+ /*
+ * Not actually used anymore, but left because some gdb
+ * versions need it.
+ */
+- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
+diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
+index 719cd70..72af944 100644
+--- a/arch/x86/ia32/sys_ia32.c
++++ b/arch/x86/ia32/sys_ia32.c
+@@ -49,18 +49,26 @@
+
+ #define AA(__x) ((unsigned long)(__x))
+
++static inline loff_t compose_loff(unsigned int high, unsigned int low)
++{
++ loff_t retval = low;
++
++ BUILD_BUG_ON(sizeof retval != sizeof low + sizeof high);
++ __builtin_memcpy((unsigned char *)&retval + sizeof low, &high, sizeof high);
++ return retval;
++}
+
+ asmlinkage long sys32_truncate64(const char __user *filename,
+- unsigned long offset_low,
+- unsigned long offset_high)
++ unsigned int offset_low,
++ unsigned int offset_high)
+ {
+- return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
++ return sys_truncate(filename, compose_loff(offset_high, offset_low));
+ }
+
+-asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
+- unsigned long offset_high)
++asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned int offset_low,
++ unsigned int offset_high)
+ {
+- return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
++ return sys_ftruncate(fd, ((unsigned long) offset_high << 32) | offset_low);
+ }
+
+ /*
+@@ -69,8 +77,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
+ */
+ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
+ {
+- typeof(ubuf->st_uid) uid = 0;
+- typeof(ubuf->st_gid) gid = 0;
++ typeof(((struct stat64 *)0)->st_uid) uid = 0;
++ typeof(((struct stat64 *)0)->st_gid) gid = 0;
+ SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
+ SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
+ if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
+@@ -196,29 +204,29 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
+ __u32 len_low, __u32 len_high, int advice)
+ {
+ return sys_fadvise64_64(fd,
+- (((u64)offset_high)<<32) | offset_low,
+- (((u64)len_high)<<32) | len_low,
++ compose_loff(offset_high, offset_low),
++ compose_loff(len_high, len_low),
+ advice);
+ }
+
+ asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
+ size_t count)
+ {
+- return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
++ return sys_readahead(fd, compose_loff(off_hi, off_lo), count);
+ }
+
+ asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
+ unsigned n_low, unsigned n_hi, int flags)
+ {
+ return sys_sync_file_range(fd,
+- ((u64)off_hi << 32) | off_low,
+- ((u64)n_hi << 32) | n_low, flags);
++ compose_loff(off_hi, off_low),
++ compose_loff(n_hi, n_low), flags);
+ }
+
+ asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
+- size_t len, int advice)
++ int len, int advice)
+ {
+- return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
++ return sys_fadvise64_64(fd, compose_loff(offset_hi, offset_lo),
+ len, advice);
+ }
+
+@@ -226,6 +234,6 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
+ unsigned offset_hi, unsigned len_lo,
+ unsigned len_hi)
+ {
+- return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
+- ((u64)len_hi << 32) | len_lo);
++ return sys_fallocate(fd, mode, compose_loff(offset_hi, offset_lo),
++ compose_loff(len_hi, len_lo));
+ }
+diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
+index e7636ba..e1fb78a 100644
+--- a/arch/x86/include/asm/alternative-asm.h
++++ b/arch/x86/include/asm/alternative-asm.h
+@@ -18,6 +18,45 @@
+ .endm
+ #endif
+
++#ifdef KERNEXEC_PLUGIN
++ .macro pax_force_retaddr_bts rip=0
++ btsq $63,\rip(%rsp)
++ .endm
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ .macro pax_force_retaddr rip=0, reload=0
++ btsq $63,\rip(%rsp)
++ .endm
++ .macro pax_force_fptr ptr
++ btsq $63,\ptr
++ .endm
++ .macro pax_set_fptr_mask
++ .endm
++#endif
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ .macro pax_force_retaddr rip=0, reload=0
++ .if \reload
++ pax_set_fptr_mask
++ .endif
++ orq %r12,\rip(%rsp)
++ .endm
++ .macro pax_force_fptr ptr
++ orq %r12,\ptr
++ .endm
++ .macro pax_set_fptr_mask
++ movabs $0x8000000000000000,%r12
++ .endm
++#endif
++#else
++ .macro pax_force_retaddr rip=0, reload=0
++ .endm
++ .macro pax_force_fptr ptr
++ .endm
++ .macro pax_force_retaddr_bts rip=0
++ .endm
++ .macro pax_set_fptr_mask
++ .endm
++#endif
++
+ /*
+ * Issue one struct alt_instr descriptor entry (need to put it into
+ * the section .altinstructions, see below). This entry contains
+@@ -50,7 +89,7 @@
+ altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
+ .popsection
+
+- .pushsection .altinstr_replacement,"ax"
++ .pushsection .altinstr_replacement,"a"
+ 143:
+ \newinstr
+ 144:
+@@ -86,7 +125,7 @@
+ altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
+ .popsection
+
+- .pushsection .altinstr_replacement,"ax"
++ .pushsection .altinstr_replacement,"a"
+ 143:
+ \newinstr1
+ 144:
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index 7bfc85b..65d1ec4 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -136,7 +136,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature, 1) \
+ ".popsection\n" \
+- ".pushsection .altinstr_replacement, \"ax\"\n" \
++ ".pushsection .altinstr_replacement, \"a\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
+ ".popsection"
+
+@@ -146,7 +146,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ ALTINSTR_ENTRY(feature1, 1) \
+ ALTINSTR_ENTRY(feature2, 2) \
+ ".popsection\n" \
+- ".pushsection .altinstr_replacement, \"ax\"\n" \
++ ".pushsection .altinstr_replacement, \"a\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
+ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
+ ".popsection"
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index c839363..b9a8c43 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+
+-extern unsigned int apic_verbosity;
++extern int apic_verbosity;
+ extern int local_apic_timer_c2_ok;
+
+ extern int disable_apic;
+diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
+index 20370c6..a2eb9b0 100644
+--- a/arch/x86/include/asm/apm.h
++++ b/arch/x86/include/asm/apm.h
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
+index e916895..42d729d 100644
+--- a/arch/x86/include/asm/atomic.h
++++ b/arch/x86/include/asm/atomic.h
+@@ -28,6 +28,17 @@ static __always_inline int atomic_read(const atomic_t *v)
+ }
+
+ /**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static __always_inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return ACCESS_ONCE((v)->counter);
++}
++
++/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+@@ -40,6 +51,18 @@ static __always_inline void atomic_set(atomic_t *v, int i)
+ }
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static __always_inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -48,7 +71,29 @@ static __always_inline void atomic_set(atomic_t *v, int i)
+ */
+ static __always_inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static __always_inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -62,7 +107,29 @@ static __always_inline void atomic_add(int i, atomic_t *v)
+ */
+ static __always_inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static __always_inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -78,7 +145,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
+ */
+ static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
+ {
+- GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
++ GEN_BINARY_RMWcc(LOCK_PREFIX "subl", LOCK_PREFIX "addl", v->counter, "er", i, "%0", "e");
+ }
+
+ /**
+@@ -89,7 +156,27 @@ static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
+ */
+ static __always_inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static __always_inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -101,7 +188,27 @@ static __always_inline void atomic_inc(atomic_t *v)
+ */
+ static __always_inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_dec_unchecked - decrement atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static __always_inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -115,7 +222,7 @@ static __always_inline void atomic_dec(atomic_t *v)
+ */
+ static __always_inline int atomic_dec_and_test(atomic_t *v)
+ {
+- GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
++ GEN_UNARY_RMWcc(LOCK_PREFIX "decl", LOCK_PREFIX "incl", v->counter, "%0", "e");
+ }
+
+ /**
+@@ -128,7 +235,20 @@ static __always_inline int atomic_dec_and_test(atomic_t *v)
+ */
+ static __always_inline int atomic_inc_and_test(atomic_t *v)
+ {
+- GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
++ GEN_UNARY_RMWcc(LOCK_PREFIX "incl", LOCK_PREFIX "decl", v->counter, "%0", "e");
++}
++
++/**
++ * atomic_inc_and_test_unchecked - increment and test
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1
++ * and returns true if the result is zero, or false for all
++ * other cases.
++ */
++static __always_inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ GEN_UNARY_RMWcc_unchecked(LOCK_PREFIX "incl", v->counter, "%0", "e");
+ }
+
+ /**
+@@ -142,7 +262,7 @@ static __always_inline int atomic_inc_and_test(atomic_t *v)
+ */
+ static __always_inline int atomic_add_negative(int i, atomic_t *v)
+ {
+- GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
++ GEN_BINARY_RMWcc(LOCK_PREFIX "addl", LOCK_PREFIX "subl", v->counter, "er", i, "%0", "s");
+ }
+
+ /**
+@@ -152,7 +272,19 @@ static __always_inline int atomic_add_negative(int i, atomic_t *v)
+ *
+ * Atomically adds @i to @v and returns @i + @v
+ */
+-static __always_inline int atomic_add_return(int i, atomic_t *v)
++static __always_inline int __intentional_overflow(-1) atomic_add_return(int i, atomic_t *v)
++{
++ return i + xadd_check_overflow(&v->counter, i);
++}
++
++/**
++ * atomic_add_return_unchecked - add integer and return
++ * @i: integer value to add
++ * @v: pointer of type atomi_uncheckedc_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static __always_inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
+ {
+ return i + xadd(&v->counter, i);
+ }
+@@ -164,15 +296,24 @@ static __always_inline int atomic_add_return(int i, atomic_t *v)
+ *
+ * Atomically subtracts @i from @v and returns @v - @i
+ */
+-static __always_inline int atomic_sub_return(int i, atomic_t *v)
++static __always_inline int __intentional_overflow(-1) atomic_sub_return(int i, atomic_t *v)
+ {
+ return atomic_add_return(-i, v);
+ }
+
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static __always_inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+
+-static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
++static __always_inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static __always_inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
+ {
+ return cmpxchg(&v->counter, old, new);
+ }
+@@ -182,6 +323,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
+ return xchg(&v->counter, new);
+ }
+
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
+ /**
+ * __atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+@@ -193,12 +339,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
+ */
+ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+@@ -207,6 +366,49 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+ }
+
+ /**
++ * atomic_inc_not_zero_hint - increment if not null
++ * @v: pointer of type atomic_t
++ * @hint: probable value of the atomic before the increment
++ *
++ * This version of atomic_inc_not_zero() gives a hint of probable
++ * value of the atomic. This helps processor to not read the memory
++ * before doing the atomic read/modify/write cycle, lowering
++ * number of bus transactions on some arches.
++ *
++ * Returns: 0 if increment was not done, 1 otherwise.
++ */
++#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
++static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
++{
++ int val, c = hint, new;
++
++ /* sanity test, should be removed by compiler if hint is a constant */
++ if (!hint)
++ return __atomic_add_unless(v, 1, 0);
++
++ do {
++ asm volatile("incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c));
++
++ val = atomic_cmpxchg(v, c, new);
++ if (val == c)
++ return 1;
++ c = val;
++ } while (c);
++
++ return 0;
++}
++
++/**
+ * atomic_inc_short - increment of a short integer
+ * @v: pointer to type int
+ *
+@@ -220,14 +422,37 @@ static __always_inline short int atomic_inc_short(short int *v)
+ }
+
+ /* These are x86-specific, used by some header files */
+-#define atomic_clear_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "andl %0,%1" \
+- : : "r" (~(mask)), "m" (*(addr)) : "memory")
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
+
+-#define atomic_set_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "orl %0,%1" \
+- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
+- : "memory")
++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
++
++static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
++
++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
+
+ #ifdef CONFIG_X86_32
+ # include <asm/atomic64_32.h>
+diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
+index b154de7..3dc335d 100644
+--- a/arch/x86/include/asm/atomic64_32.h
++++ b/arch/x86/include/asm/atomic64_32.h
+@@ -12,6 +12,14 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(val) { (val) }
+
+ #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
+@@ -37,21 +45,31 @@ typedef struct {
+ ATOMIC64_DECL_ONE(sym##_386)
+
+ ATOMIC64_DECL_ONE(add_386);
++ATOMIC64_DECL_ONE(add_unchecked_386);
+ ATOMIC64_DECL_ONE(sub_386);
++ATOMIC64_DECL_ONE(sub_unchecked_386);
+ ATOMIC64_DECL_ONE(inc_386);
++ATOMIC64_DECL_ONE(inc_unchecked_386);
+ ATOMIC64_DECL_ONE(dec_386);
++ATOMIC64_DECL_ONE(dec_unchecked_386);
+ #endif
+
+ #define alternative_atomic64(f, out, in...) \
+ __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
+
+ ATOMIC64_DECL(read);
++ATOMIC64_DECL(read_unchecked);
+ ATOMIC64_DECL(set);
++ATOMIC64_DECL(set_unchecked);
+ ATOMIC64_DECL(xchg);
+ ATOMIC64_DECL(add_return);
++ATOMIC64_DECL(add_return_unchecked);
+ ATOMIC64_DECL(sub_return);
++ATOMIC64_DECL(sub_return_unchecked);
+ ATOMIC64_DECL(inc_return);
++ATOMIC64_DECL(inc_return_unchecked);
+ ATOMIC64_DECL(dec_return);
++ATOMIC64_DECL(dec_return_unchecked);
+ ATOMIC64_DECL(dec_if_positive);
+ ATOMIC64_DECL(inc_not_zero);
+ ATOMIC64_DECL(add_unless);
+@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
+ }
+
+ /**
++ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
++ * @p: pointer to type atomic64_unchecked_t
++ * @o: expected value
++ * @n: new value
++ *
++ * Atomically sets @v to @n if it was equal to @o and returns
++ * the old value.
++ */
++
++static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
++{
++ return cmpxchg64(&v->counter, o, n);
++}
++
++/**
+ * atomic64_xchg - xchg atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @n: value to assign
+@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @n: value to assign
++ *
++ * Atomically sets the value of @v to @n.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
++{
++ unsigned high = (unsigned)(i >> 32);
++ unsigned low = (unsigned)i;
++ alternative_atomic64(set, /* no output */,
++ "S" (v), "b" (low), "c" (high)
++ : "eax", "edx", "memory");
++}
++
++/**
+ * atomic64_read - read atomic64 variable
+ * @v: pointer to type atomic64_t
+ *
+@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
+ }
+
+ /**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v and returns it.
++ */
++static inline long long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ long long r;
++ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
++ return r;
++ }
++
++/**
+ * atomic64_add_return - add and return
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
+ return i;
+ }
+
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + *@v
++ */
++static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
++{
++ alternative_atomic64(add_return_unchecked,
++ ASM_OUTPUT2("+A" (i), "+c" (v)),
++ ASM_NO_INPUT_CLOBBER("memory"));
++ return i;
++}
++
+ /*
+ * Other variants with different arithmetic operators:
+ */
+@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
+ return a;
+ }
+
++static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ long long a;
++ alternative_atomic64(inc_return_unchecked, "=&A" (a),
++ "S" (v) : "memory", "ecx");
++ return a;
++}
++
+ static inline long long atomic64_dec_return(atomic64_t *v)
+ {
+ long long a;
+@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
+ }
+
+ /**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
++{
++ __alternative_atomic64(add_unchecked, add_return_unchecked,
++ ASM_OUTPUT2("+A" (i), "+c" (v)),
++ ASM_NO_INPUT_CLOBBER("memory"));
++ return i;
++}
++
++/**
+ * atomic64_sub - subtract the atomic64 variable
+ * @i: integer value to subtract
+ * @v: pointer to type atomic64_t
+diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
+index b965f9e..8e22dd3 100644
+--- a/arch/x86/include/asm/atomic64_64.h
++++ b/arch/x86/include/asm/atomic64_64.h
+@@ -22,6 +22,18 @@ static inline long atomic64_read(const atomic64_t *v)
+ }
+
+ /**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer of type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ * Doesn't imply a read memory barrier.
++ */
++static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return ACCESS_ONCE((v)->counter);
++}
++
++/**
+ * atomic64_set - set atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @i: required value
+@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic64_add - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
+ */
+ static __always_inline void atomic64_add(long i, atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "addq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static __always_inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+@@ -56,7 +102,29 @@ static __always_inline void atomic64_add(long i, atomic64_t *v)
+ */
+ static inline void atomic64_sub(long i, atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subq %1,%0"
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_sub_unchecked - subtract the atomic64 variable
++ * @i: integer value to subtract
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+ }
+@@ -72,7 +140,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
+ */
+ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+ {
+- GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
++ GEN_BINARY_RMWcc(LOCK_PREFIX "subq", LOCK_PREFIX "addq", v->counter, "er", i, "%0", "e");
+ }
+
+ /**
+@@ -83,6 +151,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+ */
+ static __always_inline void atomic64_inc(atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static __always_inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+@@ -96,7 +185,28 @@ static __always_inline void atomic64_inc(atomic64_t *v)
+ */
+ static __always_inline void atomic64_dec(atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decq %0"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_dec_unchecked - decrement atomic64 variable
++ * @v: pointer to type atomic64_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static __always_inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decq %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -111,7 +221,7 @@ static __always_inline void atomic64_dec(atomic64_t *v)
+ */
+ static inline int atomic64_dec_and_test(atomic64_t *v)
+ {
+- GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
++ GEN_UNARY_RMWcc(LOCK_PREFIX "decq", LOCK_PREFIX "incq", v->counter, "%0", "e");
+ }
+
+ /**
+@@ -124,7 +234,7 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
+ */
+ static inline int atomic64_inc_and_test(atomic64_t *v)
+ {
+- GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
++ GEN_UNARY_RMWcc(LOCK_PREFIX "incq", LOCK_PREFIX "decq", v->counter, "%0", "e");
+ }
+
+ /**
+@@ -138,7 +248,7 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
+ */
+ static inline int atomic64_add_negative(long i, atomic64_t *v)
+ {
+- GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
++ GEN_BINARY_RMWcc(LOCK_PREFIX "addq", LOCK_PREFIX "subq", v->counter, "er", i, "%0", "s");
+ }
+
+ /**
+@@ -150,6 +260,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
+ */
+ static __always_inline long atomic64_add_return(long i, atomic64_t *v)
+ {
++ return i + xadd_check_overflow(&v->counter, i);
++}
++
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static __always_inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
+ return i + xadd(&v->counter, i);
+ }
+
+@@ -159,6 +281,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
+ }
+
+ #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_return_unchecked(1, v);
++}
+ #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
+
+ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+@@ -166,6 +292,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+ return cmpxchg(&v->counter, old, new);
+ }
+
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
+ static inline long atomic64_xchg(atomic64_t *v, long new)
+ {
+ return xchg(&v->counter, new);
+@@ -182,17 +313,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
+ */
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("add %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "sub %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
+index e51a8f8..ee075df 100644
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -57,7 +57,7 @@
+ do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+- ACCESS_ONCE(*p) = (v); \
++ ACCESS_ONCE_RW(*p) = (v); \
+ } while (0)
+
+ #define smp_load_acquire(p) \
+@@ -74,7 +74,7 @@ do { \
+ do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+- ACCESS_ONCE(*p) = (v); \
++ ACCESS_ONCE_RW(*p) = (v); \
+ } while (0)
+
+ #define smp_load_acquire(p) \
+diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
+index cfe3b95..d01b118 100644
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -50,7 +50,7 @@
+ * a mask operation on a byte.
+ */
+ #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
+-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
++#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
+ #define CONST_MASK(nr) (1 << ((nr) & 7))
+
+ /**
+@@ -203,7 +203,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
+ */
+ static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
+ {
+- GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
++ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
+ }
+
+ /**
+@@ -249,7 +249,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
+ */
+ static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
+ {
+- GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
++ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
+ }
+
+ /**
+@@ -302,7 +302,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
+ */
+ static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
+ {
+- GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
++ GEN_BINARY_RMWcc_unchecked(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
+ }
+
+ static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
+@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+-static inline unsigned long __ffs(unsigned long word)
++static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
+ {
+ asm("rep; bsf %1,%0"
+ : "=r" (word)
+@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+-static inline unsigned long ffz(unsigned long word)
++static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
+ {
+ asm("rep; bsf %1,%0"
+ : "=r" (word)
+@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+-static inline unsigned long __fls(unsigned long word)
++static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
+ {
+ asm("bsr %1,%0"
+ : "=r" (word)
+@@ -434,7 +434,7 @@ static inline int ffs(int x)
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 32.
+ */
+-static inline int fls(int x)
++static inline int __intentional_overflow(-1) fls(int x)
+ {
+ int r;
+
+@@ -476,7 +476,7 @@ static inline int fls(int x)
+ * at position 64.
+ */
+ #ifdef CONFIG_X86_64
+-static __always_inline int fls64(__u64 x)
++static __always_inline __intentional_overflow(-1) int fls64(__u64 x)
+ {
+ int bitpos = -1;
+ /*
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index 4fa687a..4ca636f 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -6,7 +6,7 @@
+ #include <uapi/asm/boot.h>
+
+ /* Physical address where kernel should be loaded. */
+-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ + (CONFIG_PHYSICAL_ALIGN - 1)) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
+diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
+index 48f99f1..d78ebf9 100644
+--- a/arch/x86/include/asm/cache.h
++++ b/arch/x86/include/asm/cache.h
+@@ -5,12 +5,13 @@
+
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data..read_mostly")))
++#define __read_only __attribute__((__section__(".data..read_only")))
+
+ #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
+-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
++#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
+
+ #ifdef CONFIG_X86_VSMP
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
+index f50de69..2b0a458 100644
+--- a/arch/x86/include/asm/checksum_32.h
++++ b/arch/x86/include/asm/checksum_32.h
+@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
+ /*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+@@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
+
+ might_sleep();
+ stac();
+- ret = csum_partial_copy_generic((__force void *)src, dst,
++ ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ clac();
+
+@@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
+ might_sleep();
+ if (access_ok(VERIFY_WRITE, dst, len)) {
+ stac();
+- ret = csum_partial_copy_generic(src, (__force void *)dst,
++ ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+ clac();
+ return ret;
+diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
+index ad19841..0784041 100644
+--- a/arch/x86/include/asm/cmpxchg.h
++++ b/arch/x86/include/asm/cmpxchg.h
+@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
+ __compiletime_error("Bad argument size for cmpxchg");
+ extern void __xadd_wrong_size(void)
+ __compiletime_error("Bad argument size for xadd");
++extern void __xadd_check_overflow_wrong_size(void)
++ __compiletime_error("Bad argument size for xadd_check_overflow");
+ extern void __add_wrong_size(void)
+ __compiletime_error("Bad argument size for add");
++extern void __add_check_overflow_wrong_size(void)
++ __compiletime_error("Bad argument size for add_check_overflow");
+
+ /*
+ * Constants for operation sizes. On 32-bit, the 64-bit size it set to
+@@ -67,6 +71,38 @@ extern void __add_wrong_size(void)
+ __ret; \
+ })
+
++#ifdef CONFIG_PAX_REFCOUNT
++#define __xchg_op_check_overflow(ptr, arg, op, lock) \
++ ({ \
++ __typeof__ (*(ptr)) __ret = (arg); \
++ switch (sizeof(*(ptr))) { \
++ case __X86_CASE_L: \
++ asm volatile (lock #op "l %0, %1\n" \
++ "jno 0f\n" \
++ "mov %0,%1\n" \
++ "int $4\n0:\n" \
++ _ASM_EXTABLE(0b, 0b) \
++ : "+r" (__ret), "+m" (*(ptr)) \
++ : : "memory", "cc"); \
++ break; \
++ case __X86_CASE_Q: \
++ asm volatile (lock #op "q %q0, %1\n" \
++ "jno 0f\n" \
++ "mov %0,%1\n" \
++ "int $4\n0:\n" \
++ _ASM_EXTABLE(0b, 0b) \
++ : "+r" (__ret), "+m" (*(ptr)) \
++ : : "memory", "cc"); \
++ break; \
++ default: \
++ __ ## op ## _check_overflow_wrong_size(); \
++ } \
++ __ret; \
++ })
++#else
++#define __xchg_op_check_overflow(ptr, arg, op, lock) __xchg_op(ptr, arg, op, lock)
++#endif
++
+ /*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
+ * Since this is generally used to protect other memory information, we
+@@ -165,6 +201,9 @@ extern void __add_wrong_size(void)
+ #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
+ #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
+
++#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
++#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
++
+ #define __add(ptr, inc, lock) \
+ ({ \
+ __typeof__ (*(ptr)) __ret = (inc); \
+diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
+index acdee09..a553db3 100644
+--- a/arch/x86/include/asm/compat.h
++++ b/arch/x86/include/asm/compat.h
+@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
+ typedef u32 compat_uint_t;
+ typedef u32 compat_ulong_t;
+ typedef u64 __attribute__((aligned(4))) compat_u64;
+-typedef u32 compat_uptr_t;
++typedef u32 __user compat_uptr_t;
+
+ struct compat_timespec {
+ compat_time_t tv_sec;
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 3d6606f..300641d 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -214,7 +214,8 @@
+ #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
+ #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
+ #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
+-
++#define X86_FEATURE_PCIDUDEREF ( 8*32+30) /* PaX PCID based UDEREF */
++#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
+
+ /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+@@ -222,7 +223,7 @@
+ #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
+ #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
+ #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
+-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
++#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Prevention */
+ #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+ #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
+ #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
+@@ -401,6 +402,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
+ #define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
+ #define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
+ #define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
++#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
+
+ #if __GNUC__ >= 4
+ extern void warn_pre_alternatives(void);
+@@ -454,7 +456,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+
+ #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+ t_warn:
+- warn_pre_alternatives();
++ if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID && bit != X86_FEATURE_PCIDUDEREF)
++ warn_pre_alternatives();
+ return false;
+ #endif
+
+@@ -475,7 +478,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
+ ".previous\n"
+- ".section .altinstr_replacement,\"ax\"\n"
++ ".section .altinstr_replacement,\"a\"\n"
+ "3: movb $1,%0\n"
+ "4:\n"
+ ".previous\n"
+@@ -510,7 +513,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+ " .byte 5f - 4f\n" /* repl len */
+ " .byte 3b - 2b\n" /* pad len */
+ ".previous\n"
+- ".section .altinstr_replacement,\"ax\"\n"
++ ".section .altinstr_replacement,\"a\"\n"
+ "4: jmp %l[t_no]\n"
+ "5:\n"
+ ".previous\n"
+@@ -545,7 +548,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
+ ".previous\n"
+- ".section .altinstr_replacement,\"ax\"\n"
++ ".section .altinstr_replacement,\"a\"\n"
+ "3: movb $0,%0\n"
+ "4:\n"
+ ".previous\n"
+@@ -560,7 +563,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+ ".section .discard,\"aw\",@progbits\n"
+ " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
+ ".previous\n"
+- ".section .altinstr_replacement,\"ax\"\n"
++ ".section .altinstr_replacement,\"a\"\n"
+ "5: movb $1,%0\n"
+ "6:\n"
+ ".previous\n"
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index 4e10d73..7319a47 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -4,6 +4,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
+
+ #include <linux/smp.h>
+ #include <linux/percpu.h>
+@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
+
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
++ desc->type |= info->seg_not_present ^ 1;
+
+ desc->s = 1;
+ desc->dpl = 0x3;
+@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
+ }
+
+ extern struct desc_ptr idt_descr;
+-extern gate_desc idt_table[];
+-extern struct desc_ptr debug_idt_descr;
+-extern gate_desc debug_idt_table[];
+-
+-struct gdt_page {
+- struct desc_struct gdt[GDT_ENTRIES];
+-} __attribute__((aligned(PAGE_SIZE)));
+-
+-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
++extern gate_desc idt_table[IDT_ENTRIES];
++extern const struct desc_ptr debug_idt_descr;
++extern gate_desc debug_idt_table[IDT_ENTRIES];
+
++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+- return per_cpu(gdt_page, cpu).gdt;
++ return cpu_gdt_table[cpu];
+ }
+
+ #ifdef CONFIG_X86_64
+@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
+ unsigned long base, unsigned dpl, unsigned flags,
+ unsigned short seg)
+ {
+- gate->a = (seg << 16) | (base & 0xffff);
+- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
++ gate->gate.offset_low = base;
++ gate->gate.seg = seg;
++ gate->gate.reserved = 0;
++ gate->gate.type = type;
++ gate->gate.s = 0;
++ gate->gate.dpl = dpl;
++ gate->gate.p = 1;
++ gate->gate.offset_high = base >> 16;
+ }
+
+ #endif
+@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
+
+ static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
+ {
++ pax_open_kernel();
+ memcpy(&idt[entry], gate, sizeof(*gate));
++ pax_close_kernel();
+ }
+
+ static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
+ {
++ pax_open_kernel();
+ memcpy(&ldt[entry], desc, 8);
++ pax_close_kernel();
+ }
+
+ static inline void
+@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
+ default: size = sizeof(*gdt); break;
+ }
+
++ pax_open_kernel();
+ memcpy(&gdt[entry], desc, size);
++ pax_close_kernel();
+ }
+
+ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
+
+ static inline void native_load_tr_desc(void)
+ {
++ pax_open_kernel();
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++ pax_close_kernel();
+ }
+
+ static inline void native_load_gdt(const struct desc_ptr *dtr)
+@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+ unsigned int i;
+
++ pax_open_kernel();
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++ pax_close_kernel();
+ }
+
+ /* This intentionally ignores lm, since 32-bit apps don't have that field. */
+@@ -280,7 +293,7 @@ static inline void clear_LDT(void)
+ set_ldt(NULL, 0);
+ }
+
+-static inline unsigned long get_desc_base(const struct desc_struct *desc)
++static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
+ {
+ return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
+ }
+@@ -304,7 +317,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
+ }
+
+ #ifdef CONFIG_X86_64
+-static inline void set_nmi_gate(int gate, void *addr)
++static inline void set_nmi_gate(int gate, const void *addr)
+ {
+ gate_desc s;
+
+@@ -314,14 +327,14 @@ static inline void set_nmi_gate(int gate, void *addr)
+ #endif
+
+ #ifdef CONFIG_TRACING
+-extern struct desc_ptr trace_idt_descr;
+-extern gate_desc trace_idt_table[];
++extern const struct desc_ptr trace_idt_descr;
++extern gate_desc trace_idt_table[IDT_ENTRIES];
+ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
+ {
+ write_idt_entry(trace_idt_table, entry, gate);
+ }
+
+-static inline void _trace_set_gate(int gate, unsigned type, void *addr,
++static inline void _trace_set_gate(int gate, unsigned type, const void *addr,
+ unsigned dpl, unsigned ist, unsigned seg)
+ {
+ gate_desc s;
+@@ -341,7 +354,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
+ #define _trace_set_gate(gate, type, addr, dpl, ist, seg)
+ #endif
+
+-static inline void _set_gate(int gate, unsigned type, void *addr,
++static inline void _set_gate(int gate, unsigned type, const void *addr,
+ unsigned dpl, unsigned ist, unsigned seg)
+ {
+ gate_desc s;
+@@ -364,14 +377,14 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
+ #define set_intr_gate_notrace(n, addr) \
+ do { \
+ BUG_ON((unsigned)n > 0xFF); \
+- _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
++ _set_gate(n, GATE_INTERRUPT, (const void *)addr, 0, 0, \
+ __KERNEL_CS); \
+ } while (0)
+
+ #define set_intr_gate(n, addr) \
+ do { \
+ set_intr_gate_notrace(n, addr); \
+- _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
++ _trace_set_gate(n, GATE_INTERRUPT, (const void *)trace_##addr,\
+ 0, 0, __KERNEL_CS); \
+ } while (0)
+
+@@ -399,19 +412,19 @@ static inline void alloc_system_vector(int vector)
+ /*
+ * This routine sets up an interrupt gate at directory privilege level 3.
+ */
+-static inline void set_system_intr_gate(unsigned int n, void *addr)
++static inline void set_system_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_system_trap_gate(unsigned int n, void *addr)
++static inline void set_system_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_trap_gate(unsigned int n, void *addr)
++static inline void set_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
+@@ -420,16 +433,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
+ static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
+ }
+
+-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
+ }
+
+-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+@@ -501,4 +514,17 @@ static inline void load_current_idt(void)
+ else
+ load_idt((const struct desc_ptr *)&idt_descr);
+ }
++
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
++{
++ struct desc_struct d;
++
++ if (likely(limit))
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++ pack_descriptor(&d, base, limit, 0xFB, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
++}
++#endif
++
+ #endif /* _ASM_X86_DESC_H */
+diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
+index 278441f..b95a174 100644
+--- a/arch/x86/include/asm/desc_defs.h
++++ b/arch/x86/include/asm/desc_defs.h
+@@ -31,6 +31,12 @@ struct desc_struct {
+ unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
+ unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
+ };
++ struct {
++ u16 offset_low;
++ u16 seg;
++ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
++ unsigned offset_high: 16;
++ } gate;
+ };
+ } __attribute__((packed));
+
+diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
+index ced283a..ffe04cc 100644
+--- a/arch/x86/include/asm/div64.h
++++ b/arch/x86/include/asm/div64.h
+@@ -39,7 +39,7 @@
+ __mod; \
+ })
+
+-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
++static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+ {
+ union {
+ u64 v64;
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index f161c18..97d43e8 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -75,9 +75,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
+
+ #include <asm/vdso.h>
+
+-#ifdef CONFIG_X86_64
+-extern unsigned int vdso64_enabled;
+-#endif
+ #if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
+ extern unsigned int vdso32_enabled;
+ #endif
+@@ -250,7 +247,25 @@ extern int force_personality32;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#ifdef CONFIG_X86_32
++#define PAX_ELF_ET_DYN_BASE 0x10000000UL
++
++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#else
++#define PAX_ELF_ET_DYN_BASE 0x400000UL
++
++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#endif
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+@@ -299,17 +314,13 @@ do { \
+
+ #define ARCH_DLINFO \
+ do { \
+- if (vdso64_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+- (unsigned long __force)current->mm->context.vdso); \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
+ } while (0)
+
+ /* As a historical oddity, the x32 and x86_64 vDSOs are controlled together. */
+ #define ARCH_DLINFO_X32 \
+ do { \
+- if (vdso64_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+- (unsigned long __force)current->mm->context.vdso); \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
+ } while (0)
+
+ #define AT_SYSINFO 32
+@@ -324,10 +335,10 @@ else \
+
+ #endif /* !CONFIG_X86_32 */
+
+-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
++#define VDSO_CURRENT_BASE (current->mm->context.vdso)
+
+ #define VDSO_ENTRY \
+- ((unsigned long)current->mm->context.vdso + \
++ (current->mm->context.vdso + \
+ selected_vdso32->sym___kernel_vsyscall)
+
+ struct linux_binprm;
+diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
+index 77a99ac..39ff7f5 100644
+--- a/arch/x86/include/asm/emergency-restart.h
++++ b/arch/x86/include/asm/emergency-restart.h
+@@ -1,6 +1,6 @@
+ #ifndef _ASM_X86_EMERGENCY_RESTART_H
+ #define _ASM_X86_EMERGENCY_RESTART_H
+
+-extern void machine_emergency_restart(void);
++extern void machine_emergency_restart(void) __noreturn;
+
+ #endif /* _ASM_X86_EMERGENCY_RESTART_H */
+diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
+index 1c7eefe..d0e4702 100644
+--- a/arch/x86/include/asm/floppy.h
++++ b/arch/x86/include/asm/floppy.h
+@@ -229,18 +229,18 @@ static struct fd_routine_l {
+ int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
+ } fd_routine[] = {
+ {
+- request_dma,
+- free_dma,
+- get_dma_residue,
+- dma_mem_alloc,
+- hard_dma_setup
++ ._request_dma = request_dma,
++ ._free_dma = free_dma,
++ ._get_dma_residue = get_dma_residue,
++ ._dma_mem_alloc = dma_mem_alloc,
++ ._dma_setup = hard_dma_setup
+ },
+ {
+- vdma_request_dma,
+- vdma_nop,
+- vdma_get_dma_residue,
+- vdma_mem_alloc,
+- vdma_dma_setup
++ ._request_dma = vdma_request_dma,
++ ._free_dma = vdma_nop,
++ ._get_dma_residue = vdma_get_dma_residue,
++ ._dma_mem_alloc = vdma_mem_alloc,
++ ._dma_setup = vdma_dma_setup
+ }
+ };
+
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
+index 3c3550c..995858d 100644
+--- a/arch/x86/include/asm/fpu/internal.h
++++ b/arch/x86/include/asm/fpu/internal.h
+@@ -97,8 +97,11 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
+ #define user_insn(insn, output, input...) \
+ ({ \
+ int err; \
++ pax_open_userland(); \
+ asm volatile(ASM_STAC "\n" \
+- "1:" #insn "\n\t" \
++ "1:" \
++ __copyuser_seg \
++ #insn "\n\t" \
+ "2: " ASM_CLAC "\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl $-1,%[err]\n" \
+@@ -107,6 +110,7 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
+ _ASM_EXTABLE(1b, 3b) \
+ : [err] "=r" (err), output \
+ : "0"(0), input); \
++ pax_close_userland(); \
+ err; \
+ })
+
+@@ -186,9 +190,9 @@ static inline int copy_user_to_fregs(struct fregs_state __user *fx)
+ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
+ {
+ if (config_enabled(CONFIG_X86_32))
+- asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
++ asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
+ else if (config_enabled(CONFIG_AS_FXSAVEQ))
+- asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
++ asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
+ else {
+ /* Using "rex64; fxsave %0" is broken because, if the memory
+ * operand uses any extended registers for addressing, a second
+@@ -212,8 +216,8 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
+ * registers.
+ */
+ asm volatile( "rex64/fxsave (%[fx])"
+- : "=m" (fpu->state.fxsave)
+- : [fx] "R" (&fpu->state.fxsave));
++ : "=m" (fpu->state->fxsave)
++ : [fx] "R" (&fpu->state->fxsave));
+ }
+ }
+
+@@ -388,12 +392,16 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
+ if (unlikely(err))
+ return -EFAULT;
+
++ pax_open_userland();
+ __asm__ __volatile__(ASM_STAC "\n"
+- "1:"XSAVE"\n"
++ "1:"
++ __copyuser_seg
++ XSAVE"\n"
+ "2: " ASM_CLAC "\n"
+ xstate_fault(err)
+ : "D" (buf), "a" (-1),