summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201312081751.patch')
-rw-r--r--2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201312081751.patch129714
1 files changed, 129714 insertions, 0 deletions
diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201312081751.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201312081751.patch
new file mode 100644
index 0000000..cabb1eb
--- /dev/null
+++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.61-201312081751.patch
@@ -0,0 +1,129714 @@
+diff --git a/Documentation/dontdiff b/Documentation/dontdiff
+index e1efc40..3569a2f 100644
+--- a/Documentation/dontdiff
++++ b/Documentation/dontdiff
+@@ -1,15 +1,20 @@
+ *.a
+ *.aux
+ *.bin
++*.c.[012].*
++*.cis
+ *.cpio
+ *.csp
++*.dbg
+ *.dsp
+ *.dvi
+ *.elf
+ *.eps
+ *.fw
++*.gcno
+ *.gen.S
+ *.gif
++*.gmo
+ *.grep
+ *.grp
+ *.gz
+@@ -38,22 +43,30 @@
+ *.tab.h
+ *.tex
+ *.ver
++*.vim
+ *.xml
+ *_MODULES
++*_reg_safe.h
+ *_vga16.c
+ *~
+ *.9
+ *.9.gz
+-.*
++.[^g]*
++.gen*
+ .mm
+ 53c700_d.h
+ CVS
+ ChangeSet
++GPATH
++GRTAGS
++GSYMS
++GTAGS
+ Image
+ Kerntypes
+ Module.markers
+ Module.symvers
+ PENDING
++PERF*
+ SCCS
+ System.map*
+ TAGS
+@@ -62,6 +75,7 @@ aic7*reg_print.c*
+ aic7*seq.h*
+ aicasm
+ aicdb.h*
++ashldi3.S
+ asm-offsets.h
+ asm_offsets.h
+ autoconf.h*
+@@ -76,7 +90,11 @@ btfixupprep
+ build
+ bvmlinux
+ bzImage*
++capability_names.h
++capflags.c
+ classlist.h*
++clut_vga16.c
++common-cmds.h
+ comp*.log
+ compile.h*
+ conf
+@@ -84,6 +102,8 @@ config
+ config-*
+ config_data.h*
+ config_data.gz*
++config.c
++config.tmp
+ conmakehash
+ consolemap_deftbl.c*
+ cpustr.h
+@@ -97,19 +117,23 @@ elfconfig.h*
+ fixdep
+ fore200e_mkfirm
+ fore200e_pca_fw.c*
++gate.lds
+ gconf
+ gen-devlist
+ gen_crc32table
+ gen_init_cpio
+ genksyms
+ *_gray256.c
++hash
++hid-example
+ ihex2fw
+ ikconfig.h*
+ initramfs_data.cpio
++initramfs_data.cpio.bz2
+ initramfs_data.cpio.gz
+ initramfs_list
+ kallsyms
+-kconfig
++kern_constants.h
+ keywords.c
+ ksym.c*
+ ksym.h*
+@@ -117,6 +141,7 @@ kxgettext
+ lkc_defs.h
+ lex.c
+ lex.*.c
++lib1funcs.S
+ logo_*.c
+ logo_*_clut224.c
+ logo_*_mono.c
+@@ -127,13 +152,16 @@ machtypes.h
+ map
+ maui_boot.h
+ mconf
++mdp
+ miboot*
+ mk_elfconfig
+ mkboot
+ mkbugboot
+ mkcpustr
+ mkdep
++mkpiggy
+ mkprep
++mkregtable
+ mktables
+ mktree
+ modpost
+@@ -149,6 +177,7 @@ patches*
+ pca200e.bin
+ pca200e_ecd.bin2
+ piggy.gz
++piggy.S
+ piggyback
+ pnmtologo
+ ppc_defs.h*
+@@ -157,12 +186,16 @@ qconf
+ raid6altivec*.c
+ raid6int*.c
+ raid6tables.c
++regdb.c
+ relocs
++rlim_names.h
+ series
+ setup
+ setup.bin
+ setup.elf
++size_overflow_hash.h
+ sImage
++slabinfo
+ sm_tbl*
+ split-include
+ syscalltab.h
+@@ -171,6 +204,7 @@ tftpboot.img
+ timeconst.h
+ times.h*
+ trix_boot.h
++user_constants.h
+ utsrelease.h*
+ vdso-syms.lds
+ vdso.lds
+@@ -186,14 +220,20 @@ version.h*
+ vmlinux
+ vmlinux-*
+ vmlinux.aout
++vmlinux.bin.all
++vmlinux.bin.bz2
+ vmlinux.lds
++vmlinux.relocs
++voffset.h
+ vsyscall.lds
+ vsyscall_32.lds
+ wanxlfw.inc
+ uImage
+ unifdef
++utsrelease.h
+ wakeup.bin
+ wakeup.elf
+ wakeup.lds
+ zImage*
+ zconf.hash.c
++zoffset.h
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 14c7fb0..0f7d099 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -806,6 +806,9 @@ and is between 256 and 4096 characters. It is defined in the file
+ gpt [EFI] Forces disk with valid GPT signature but
+ invalid Protective MBR to be treated as GPT.
+
++ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
++ ignore grsecurity's /proc restrictions
++
+ gvp11= [HW,SCSI]
+
+ hashdist= [KNL,NUMA] Large hashes allocated during boot
+@@ -856,6 +859,12 @@ and is between 256 and 4096 characters. It is defined in the file
+ If specified, z/VM IUCV HVC accepts connections
+ from listed z/VM user IDs only.
+
++ keep_bootcon [KNL]
++ Do not unregister boot console at start. This is only
++ useful for debugging when something happens in the window
++ between unregistering the boot console and initializing
++ the real console.
++
+ i2c_bus= [HW] Override the default board specific I2C bus speed
+ or register an additional I2C bus that is not
+ registered from board initialization code.
+@@ -1842,6 +1851,13 @@ and is between 256 and 4096 characters. It is defined in the file
+ the specified number of seconds. This is to be used if
+ your oopses keep scrolling off the screen.
+
++ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
++ virtualization environments that don't cope well with the
++ expand down segment used by UDEREF on X86-32 or the frequent
++ page table updates on X86-64.
++
++ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
++
+ pcbit= [HW,ISDN]
+
+ pcd. [PARIDE]
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index fbe427a..195ff43 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -479,6 +479,11 @@ tcp_dma_copybreak - INTEGER
+ and CONFIG_NET_DMA is enabled.
+ Default: 4096
+
++tcp_challenge_ack_limit - INTEGER
++ Limits number of Challenge ACK sent per second, as recommended
++ in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
++ Default: 100
++
+ UDP variables:
+
+ udp_mem - vector of 3 INTEGERs: min, pressure, max
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 334258c..1e8f4ff 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -5725,6 +5725,14 @@ L: netdev@vger.kernel.org
+ S: Maintained
+ F: drivers/net/vmxnet3/
+
++VMware PVSCSI driver
++M: Alok Kataria <akataria@vmware.com>
++M: VMware PV-Drivers <pv-drivers@vmware.com>
++L: linux-scsi@vger.kernel.org
++S: Maintained
++F: drivers/scsi/vmw_pvscsi.c
++F: drivers/scsi/vmw_pvscsi.h
++
+ VOLTAGE AND CURRENT REGULATOR FRAMEWORK
+ M: Liam Girdwood <lrg@slimlogic.co.uk>
+ M: Mark Brown <broonie@opensource.wolfsonmicro.com>
+diff --git a/Makefile b/Makefile
+index e5a279c..2289941 100644
+--- a/Makefile
++++ b/Makefile
+@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+
+ HOSTCC = gcc
+ HOSTCXX = g++
+-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
+-HOSTCXXFLAGS = -O2
++HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
++HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
++HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
+
+ # Decide whether to build built-in, modular, or both.
+ # Normally, just do built-in.
+@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
+ # Rules shared between *config targets and build targets
+
+ # Basic helpers built in scripts/
+-PHONY += scripts_basic
+-scripts_basic:
++PHONY += scripts_basic gcc-plugins
++scripts_basic: gcc-plugins
+ $(Q)$(MAKE) $(build)=scripts/basic
+
+ # To avoid any implicit rule to kick in, define an empty command.
+@@ -403,7 +404,7 @@ endif
+ # of make so .config is not included in this case either (for *config).
+
+ no-dot-config-targets := clean mrproper distclean \
+- cscope TAGS tags help %docs check% \
++ cscope gtags TAGS tags help %docs check% \
+ include/linux/version.h headers_% \
+ kernelrelease kernelversion
+
+@@ -526,6 +527,64 @@ else
+ KBUILD_CFLAGS += -O2
+ endif
+
++ifndef DISABLE_PAX_PLUGINS
++ifeq ($(call cc-ifversion, -ge, 0408, y), y)
++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
++else
++PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
++endif
++ifneq ($(PLUGINCC),)
++ifndef DISABLE_PAX_CONSTIFY_PLUGIN
++ifndef CONFIG_UML
++CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
++endif
++endif
++ifdef CONFIG_PAX_MEMORY_STACKLEAK
++STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
++STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
++endif
++ifdef CONFIG_KALLOCSTAT_PLUGIN
++KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
++endif
++ifdef CONFIG_PAX_KERNEXEC_PLUGIN
++KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
++KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
++KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
++endif
++ifdef CONFIG_CHECKER_PLUGIN
++ifeq ($(call cc-ifversion, -ge, 0406, y), y)
++CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
++endif
++endif
++COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
++ifdef CONFIG_PAX_SIZE_OVERFLOW
++SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
++endif
++ifdef CONFIG_PAX_LATENT_ENTROPY
++LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
++endif
++GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
++GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
++GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
++export PLUGINCC CONSTIFY_PLUGIN
++ifeq ($(KBUILD_EXTMOD),)
++gcc-plugins:
++ $(Q)$(MAKE) $(build)=tools/gcc
++else
++gcc-plugins: ;
++endif
++else
++gcc-plugins:
++ifeq ($(call cc-ifversion, -ge, 0405, y), y)
++ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
++else
++ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
++endif
++ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
++endif
++endif
++
+ include $(srctree)/arch/$(SRCARCH)/Makefile
+
+ ifneq ($(CONFIG_FRAME_WARN),0)
+@@ -647,7 +706,7 @@ export mod_strip_cmd
+
+
+ ifeq ($(KBUILD_EXTMOD),)
+-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+
+ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+@@ -868,6 +927,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
+
+ # The actual objects are generated when descending,
+ # make sure no implicit rule kicks in
++$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+
+ # Handle descending into subdirectories listed in $(vmlinux-dirs)
+@@ -877,7 +938,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
+ # Error messages still appears in the original language
+
+ PHONY += $(vmlinux-dirs)
+-$(vmlinux-dirs): prepare scripts
++$(vmlinux-dirs): gcc-plugins prepare scripts
+ $(Q)$(MAKE) $(build)=$@
+
+ # Build the kernel release string
+@@ -986,6 +1047,7 @@ prepare0: archprepare FORCE
+ $(Q)$(MAKE) $(build)=. missing-syscalls
+
+ # All the preparing..
++prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
+ prepare: prepare0
+
+ # The asm symlink changes when $(ARCH) changes.
+@@ -1127,6 +1189,8 @@ all: modules
+ # using awk while concatenating to the final file.
+
+ PHONY += modules
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
+ $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
+ @$(kecho) ' Building modules, stage 2.';
+@@ -1136,7 +1200,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
+
+ # Target to prepare building external modules
+ PHONY += modules_prepare
+-modules_prepare: prepare scripts
++modules_prepare: gcc-plugins prepare scripts
+
+ # Target to install modules
+ PHONY += modules_install
+@@ -1199,9 +1263,9 @@ CLEAN_FILES += vmlinux System.map \
+ MRPROPER_DIRS += include/config include2 usr/include include/generated
+ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
+ include/linux/autoconf.h include/linux/version.h \
+- include/linux/utsrelease.h \
++ include/linux/utsrelease.h tools/gcc/size_overflow_hash.h \
+ include/linux/bounds.h include/asm*/asm-offsets.h \
+- Module.symvers Module.markers tags TAGS cscope*
++ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
+
+ # clean - Delete most, but leave enough to build external modules
+ #
+@@ -1245,7 +1309,7 @@ distclean: mrproper
+ @find $(srctree) $(RCS_FIND_IGNORE) \
+ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
+ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
+- -o -name '.*.rej' -o -size 0 \
++ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
+ -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
+ -type f -print | xargs rm -f
+
+@@ -1292,6 +1356,7 @@ help:
+ @echo ' modules_prepare - Set up for building external modules'
+ @echo ' tags/TAGS - Generate tags file for editors'
+ @echo ' cscope - Generate cscope index'
++ @echo ' gtags - Generate GNU GLOBAL index'
+ @echo ' kernelrelease - Output the release version string'
+ @echo ' kernelversion - Output the version stored in Makefile'
+ @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
+@@ -1393,6 +1458,8 @@ PHONY += $(module-dirs) modules
+ $(module-dirs): crmodverdir $(objtree)/Module.symvers
+ $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
+
++modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
+ modules: $(module-dirs)
+ @$(kecho) ' Building modules, stage 2.';
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+@@ -1448,7 +1515,7 @@ endif # KBUILD_EXTMOD
+ quiet_cmd_tags = GEN $@
+ cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
+
+-tags TAGS cscope: FORCE
++tags TAGS cscope gtags: FORCE
+ $(call cmd,tags)
+
+ # Scripts to check various things for consistency
+@@ -1513,17 +1580,21 @@ else
+ target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
+ endif
+
+-%.s: %.c prepare scripts FORCE
++%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.s: %.c gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.i: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.c prepare scripts FORCE
++%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.o: %.c gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.lst: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.s: %.S prepare scripts FORCE
++%.s: %.S gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+-%.o: %.S prepare scripts FORCE
++%.o: %.S gcc-plugins prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+ %.symtypes: %.c prepare scripts FORCE
+ $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
+@@ -1533,11 +1604,15 @@ endif
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir)
+-%/: prepare scripts FORCE
++%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%/: gcc-plugins prepare scripts FORCE
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir)
+-%.ko: prepare scripts FORCE
++%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
++%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
++%.ko: gcc-plugins prepare scripts FORCE
+ $(cmd_crmodverdir)
+ $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
+ $(build)=$(build-dir) $(@:.ko=.o)
+diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
+index 610dff44..f396854 100644
+--- a/arch/alpha/include/asm/atomic.h
++++ b/arch/alpha/include/asm/atomic.h
+@@ -251,6 +251,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ #define atomic_dec(v) atomic_sub(1,(v))
+ #define atomic64_dec(v) atomic64_sub(1,(v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #define smp_mb__before_atomic_dec() smp_mb()
+ #define smp_mb__after_atomic_dec() smp_mb()
+ #define smp_mb__before_atomic_inc() smp_mb()
+diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
+index f199e69..af005f5 100644
+--- a/arch/alpha/include/asm/cache.h
++++ b/arch/alpha/include/asm/cache.h
+@@ -4,19 +4,20 @@
+ #ifndef __ARCH_ALPHA_CACHE_H
+ #define __ARCH_ALPHA_CACHE_H
+
++#include <linux/const.h>
+
+ /* Bytes per L1 (data) cache line. */
+ #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
+-# define L1_CACHE_BYTES 64
+ # define L1_CACHE_SHIFT 6
+ #else
+ /* Both EV4 and EV5 are write-through, read-allocate,
+ direct-mapped, physical.
+ */
+-# define L1_CACHE_BYTES 32
+ # define L1_CACHE_SHIFT 5
+ #endif
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
++
+ #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
+index 5c75c1b..c82f878 100644
+--- a/arch/alpha/include/asm/elf.h
++++ b/arch/alpha/include/asm/elf.h
+@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
+index bc2a0da..8ad11ee 100644
+--- a/arch/alpha/include/asm/pgalloc.h
++++ b/arch/alpha/include/asm/pgalloc.h
+@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+ pgd_set(pgd, pmd);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ extern pgd_t *pgd_alloc(struct mm_struct *mm);
+
+ static inline void
+diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
+index 3f0c59f..cf1e100 100644
+--- a/arch/alpha/include/asm/pgtable.h
++++ b/arch/alpha/include/asm/pgtable.h
+@@ -101,6 +101,17 @@ struct vm_area_struct;
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
+index ebc3c89..20cfa63 100644
+--- a/arch/alpha/kernel/module.c
++++ b/arch/alpha/kernel/module.c
+@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+- gp = (u64)me->module_core + me->core_size - 0x8000;
++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
+index a94e49c..491c166 100644
+--- a/arch/alpha/kernel/osf_sys.c
++++ b/arch/alpha/kernel/osf_sys.c
+@@ -1163,16 +1163,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
+ generic version except that we know how to honor ADDR_LIMIT_32BIT. */
+
+ static unsigned long
+-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+- unsigned long limit)
++arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
++ unsigned long limit, unsigned long flags)
+ {
+ struct vm_area_struct *vma = find_vma(current->mm, addr);
+-
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+ while (1) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (limit - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
+@@ -1208,20 +1208,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+ /* Finally, try allocating in low memory. */
+- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
++ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
+
+ return addr;
+ }
+diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
+index 00a31de..2ded0f2 100644
+--- a/arch/alpha/mm/fault.c
++++ b/arch/alpha/mm/fault.c
+@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long *)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
++ do_group_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index b68faef..6dd1496 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -14,6 +14,7 @@ config ARM
+ select SYS_SUPPORTS_APM_EMULATION
+ select HAVE_OPROFILE
+ select HAVE_ARCH_KGDB
++ select GENERIC_ATOMIC64
+ select HAVE_KPROBES if (!XIP_KERNEL)
+ select HAVE_KRETPROBES if (HAVE_KPROBES)
+ select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
+diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
+index d0daeab..638f5e8 100644
+--- a/arch/arm/include/asm/atomic.h
++++ b/arch/arm/include/asm/atomic.h
+@@ -15,6 +15,10 @@
+ #include <linux/types.h>
+ #include <asm/system.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
+ #ifdef __KERNEL__
+@@ -24,8 +28,16 @@
+ * strex/ldrex monitor on some implementations. The reason we can use it for
+ * atomic_set() is the clrex or dummy strex done on every exception return.
+ */
+-#define atomic_read(v) ((v)->counter)
++#define atomic_read(v) (*(volatile int *)&(v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
+ #define atomic_set(v,i) (((v)->counter) = (i))
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+
+ #if __LINUX_ARM_ARCH__ >= 6
+
+@@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
+ int result;
+
+ __asm__ __volatile__("@ atomic_add\n"
++"1: ldrex %1, [%2]\n"
++" add %0, %1, %3\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strex %1, %0, [%2]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++}
++
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ __asm__ __volatile__("@ atomic_add_unchecked\n"
+ "1: ldrex %0, [%2]\n"
+ " add %0, %0, %3\n"
+ " strex %1, %0, [%2]\n"
+@@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic_add_return\n"
++"1: ldrex %1, [%2]\n"
++" add %0, %1, %3\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strex %1, %0, [%2]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++
++ smp_mb();
++
++ return result;
++}
++
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ smp_mb();
++
++ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
+ "1: ldrex %0, [%2]\n"
+ " add %0, %0, %3\n"
+ " strex %1, %0, [%2]\n"
+@@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
+ int result;
+
+ __asm__ __volatile__("@ atomic_sub\n"
++"1: ldrex %1, [%2]\n"
++" sub %0, %1, %3\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
++" strex %1, %0, [%2]\n"
++" teq %1, #0\n"
++" bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
++ : "=&r" (result), "=&r" (tmp)
++ : "r" (&v->counter), "Ir" (i)
++ : "cc");
++}
++
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ unsigned long tmp;
++ int result;
++
++ __asm__ __volatile__("@ atomic_sub_unchecked\n"
+ "1: ldrex %0, [%2]\n"
+ " sub %0, %0, %3\n"
+ " strex %1, %0, [%2]\n"
+@@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+ smp_mb();
+
+ __asm__ __volatile__("@ atomic_sub_return\n"
+-"1: ldrex %0, [%2]\n"
+-" sub %0, %0, %3\n"
++"1: ldrex %1, [%2]\n"
++" subs %0, %1, %3\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" bvc 3f\n"
++" mov %0, %1\n"
++"2: bkpt 0xf103\n"
++"3:\n"
++#endif
++
+ " strex %1, %0, [%2]\n"
+ " teq %1, #0\n"
+ " bne 1b"
++
++#ifdef CONFIG_PAX_REFCOUNT
++"\n4:\n"
++ _ASM_EXTABLE(2b, 4b)
++#endif
++
+ : "=&r" (result), "=&r" (tmp)
+ : "r" (&v->counter), "Ir" (i)
+ : "cc");
+@@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+ return oldval;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
++{
++ unsigned long oldval, res;
++
++ smp_mb();
++
++ do {
++ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
++ "ldrex %1, [%2]\n"
++ "mov %0, #0\n"
++ "teq %1, %3\n"
++ "strexeq %0, %4, [%2]\n"
++ : "=&r" (res), "=&r" (oldval)
++ : "r" (&ptr->counter), "Ir" (old), "r" (new)
++ : "cc");
++ } while (res);
++
++ smp_mb();
++
++ return oldval;
++}
++
+ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ {
+ unsigned long tmp, tmp2;
+@@ -165,7 +307,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
+
+ return val;
+ }
++
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ return atomic_add_return(i, v);
++}
++
+ #define atomic_add(i, v) (void) atomic_add_return(i, v)
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ (void) atomic_add_return(i, v);
++}
+
+ static inline int atomic_sub_return(int i, atomic_t *v)
+ {
+@@ -180,6 +332,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+ return val;
+ }
+ #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ (void) atomic_sub_return(i, v);
++}
+
+ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ {
+@@ -195,6 +351,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ return ret;
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return atomic_cmpxchg(v, old, new);
++}
++
+ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ {
+ unsigned long flags;
+@@ -207,6 +368,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+ #endif /* __LINUX_ARM_ARCH__ */
+
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+@@ -220,11 +385,27 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+
+ #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v) == 0;
++}
+ #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+ #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
+index 66c160b..bca1449 100644
+--- a/arch/arm/include/asm/cache.h
++++ b/arch/arm/include/asm/cache.h
+@@ -4,8 +4,10 @@
+ #ifndef __ASMARM_CACHE_H
+ #define __ASMARM_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
+index 3d0cdd2..19957c5 100644
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -216,13 +216,13 @@ struct cpu_cache_fns {
+ void (*dma_inv_range)(const void *, const void *);
+ void (*dma_clean_range)(const void *, const void *);
+ void (*dma_flush_range)(const void *, const void *);
+-};
++} __no_const;
+
+ struct outer_cache_fns {
+ void (*inv_range)(unsigned long, unsigned long);
+ void (*clean_range)(unsigned long, unsigned long);
+ void (*flush_range)(unsigned long, unsigned long);
+-};
++} __no_const;
+
+ /*
+ * Select the calling method
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index 6aac3f5..265536b 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00008000UL
++
++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
++#endif
+
+ /* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
+index c019949..388fdd1 100644
+--- a/arch/arm/include/asm/kmap_types.h
++++ b/arch/arm/include/asm/kmap_types.h
+@@ -19,6 +19,7 @@ enum km_type {
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
+ KM_L2_CACHE,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
+index 3a32af4..c8def8a 100644
+--- a/arch/arm/include/asm/page.h
++++ b/arch/arm/include/asm/page.h
+@@ -122,7 +122,7 @@ struct cpu_user_fns {
+ void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
+ void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
+ unsigned long vaddr);
+-};
++} __no_const;
+
+ #ifdef MULTI_USER
+ extern struct cpu_user_fns cpu_user;
+diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
+index 1df6457..c806a73 100644
+--- a/arch/arm/include/asm/ptrace.h
++++ b/arch/arm/include/asm/ptrace.h
+@@ -69,7 +69,7 @@
+ /*
+ * ARMv7 groups of APSR bits
+ */
+-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
++#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
+ #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
+ #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
+
+diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
+index d65b2f5..9d87555 100644
+--- a/arch/arm/include/asm/system.h
++++ b/arch/arm/include/asm/system.h
+@@ -86,6 +86,8 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
+
+ #define xchg(ptr,x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
++#define xchg_unchecked(ptr,x) \
++ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+
+ extern asmlinkage void __backtrace(void);
+ extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+@@ -98,7 +100,7 @@ extern int cpu_architecture(void);
+ extern void cpu_init(void);
+
+ void arm_machine_restart(char mode, const char *cmd);
+-extern void (*arm_pm_restart)(char str, const char *cmd);
++extern void (*arm_pm_restart)(char str, const char *cmd) __noreturn;
+
+ #define UDBG_UNDEFINED (1 << 0)
+ #define UDBG_SYSCALL (1 << 1)
+@@ -505,6 +507,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
+
+ #endif /* __LINUX_ARM_ARCH__ >= 6 */
+
++#define _ASM_EXTABLE(from, to) \
++" .pushsection __ex_table,\"a\"\n"\
++" .align 3\n" \
++" .long " #from ", " #to"\n" \
++" .popsection"
++
++
+ #endif /* __ASSEMBLY__ */
+
+ #define arch_align_stack(x) (x)
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index 2dfb7d7..8fadd73 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -138,6 +138,12 @@ extern void vfp_sync_state(struct thread_info *thread);
+ #define TIF_NEED_RESCHED 1
+ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+ #define TIF_SYSCALL_TRACE 8
++
++/* within 8 bits of TIF_SYSCALL_TRACE
++ to meet flexible second operand requirements
++*/
++#define TIF_GRSEC_SETXID 9
++
+ #define TIF_POLLING_NRFLAG 16
+ #define TIF_USING_IWMMXT 17
+ #define TIF_MEMDIE 18
+@@ -152,6 +158,10 @@ extern void vfp_sync_state(struct thread_info *thread);
+ #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
+ #define _TIF_FREEZE (1 << TIF_FREEZE)
+ #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
++
++/* Checks for any syscall work in entry-common.S */
++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_GRSEC_SETXID)
+
+ /*
+ * Change these and you break ASM code in entry-common.S
+diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
+index 1d6bd40..159316f 100644
+--- a/arch/arm/include/asm/uaccess.h
++++ b/arch/arm/include/asm/uaccess.h
+@@ -387,8 +387,23 @@ do { \
+
+
+ #ifdef CONFIG_MMU
+-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
+-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
++extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
++extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
++
++static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ return ___copy_from_user(to, from, n);
++}
++
++static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return ___copy_to_user(to, from, n);
++}
++
+ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
+ extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+@@ -403,6 +418,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
+
+ static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else /* security hole - plug it */
+@@ -412,6 +430,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
+
+ static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
+index 0e62770..e2c2cd6 100644
+--- a/arch/arm/kernel/armksyms.c
++++ b/arch/arm/kernel/armksyms.c
+@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
+ #ifdef CONFIG_MMU
+ EXPORT_SYMBOL(copy_page);
+
+-EXPORT_SYMBOL(__copy_from_user);
+-EXPORT_SYMBOL(__copy_to_user);
++EXPORT_SYMBOL(___copy_from_user);
++EXPORT_SYMBOL(___copy_to_user);
+ EXPORT_SYMBOL(__clear_user);
+
+ EXPORT_SYMBOL(__get_user_1);
+diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
+index a6c66f5..bfdad39 100644
+--- a/arch/arm/kernel/entry-common.S
++++ b/arch/arm/kernel/entry-common.S
+@@ -77,7 +77,7 @@ ENTRY(ret_from_fork)
+ get_thread_info tsk
+ ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
+ mov why, #1
+- tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
++ tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
+ beq ret_slow_syscall
+ mov r1, sp
+ mov r0, #1 @ trace exit [IP = 1]
+@@ -275,7 +275,7 @@ ENTRY(vector_swi)
+ #endif
+
+ stmdb sp!, {r4, r5} @ push fifth and sixth args
+- tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
++ tst ip, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
+ bne __sys_trace
+
+ cmp scno, #NR_syscalls @ check upper syscall limit
+diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
+index 38ccbe1..ca979b0 100644
+--- a/arch/arm/kernel/head.S
++++ b/arch/arm/kernel/head.S
+@@ -45,7 +45,9 @@
+ .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
+
+ .macro pgtbl, rd
+- ldr \rd, =(KERNEL_RAM_PADDR - 0x4000)
++ mov \rd, #KERNEL_RAM_PADDR
++ sub \rd, #0x4000
++ add \rd, \rd, \phys
+ .endm
+
+ #ifdef CONFIG_XIP_KERNEL
+diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
+index ba8ccfe..2dc34dc 100644
+--- a/arch/arm/kernel/kgdb.c
++++ b/arch/arm/kernel/kgdb.c
+@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
+ * and we handle the normal undef case within the do_undefinstr
+ * handler.
+ */
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ #ifndef __ARMEB__
+ .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
+ #else /* ! __ARMEB__ */
+diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
+index 61f90d3..b1b8ab9 100644
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -83,7 +83,7 @@ static int __init hlt_setup(char *__unused)
+ __setup("nohlt", nohlt_setup);
+ __setup("hlt", hlt_setup);
+
+-void arm_machine_restart(char mode, const char *cmd)
++__noreturn void arm_machine_restart(char mode, const char *cmd)
+ {
+ /*
+ * Clean and disable cache, and turn off interrupts
+@@ -117,7 +117,7 @@ void arm_machine_restart(char mode, const char *cmd)
+ void (*pm_power_off)(void);
+ EXPORT_SYMBOL(pm_power_off);
+
+-void (*arm_pm_restart)(char str, const char *cmd) = arm_machine_restart;
++void (*arm_pm_restart)(char str, const char *cmd) __noreturn = arm_machine_restart;
+ EXPORT_SYMBOL_GPL(arm_pm_restart);
+
+
+@@ -195,6 +195,7 @@ __setup("reboot=", reboot_setup);
+
+ void machine_halt(void)
+ {
++ BUG();
+ }
+
+
+@@ -202,6 +203,7 @@ void machine_power_off(void)
+ {
+ if (pm_power_off)
+ pm_power_off();
++ BUG();
+ }
+
+ void machine_restart(char *cmd)
+@@ -218,8 +220,8 @@ void __show_regs(struct pt_regs *regs)
+ smp_processor_id(), print_tainted(), init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+- print_symbol("PC is at %s\n", instruction_pointer(regs));
+- print_symbol("LR is at %s\n", regs->ARM_lr);
++ printk("PC is at %pA\n", instruction_pointer(regs));
++ printk("LR is at %pA\n", regs->ARM_lr);
+ printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
+ "sp : %08lx ip : %08lx fp : %08lx\n",
+ regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index a2ea385..4783488 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -847,10 +847,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
+ {
+ unsigned long ip;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return scno;
+ if (!(current->ptrace & PT_PTRACED))
+diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
+index c6c57b6..8ec5c3f 100644
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -92,16 +92,16 @@ EXPORT_SYMBOL(elf_hwcap);
+ struct processor processor;
+ #endif
+ #ifdef MULTI_TLB
+-struct cpu_tlb_fns cpu_tlb;
++struct cpu_tlb_fns cpu_tlb __read_mostly;
+ #endif
+ #ifdef MULTI_USER
+-struct cpu_user_fns cpu_user;
++struct cpu_user_fns cpu_user __read_mostly;
+ #endif
+ #ifdef MULTI_CACHE
+-struct cpu_cache_fns cpu_cache;
++struct cpu_cache_fns cpu_cache __read_mostly;
+ #endif
+ #ifdef CONFIG_OUTER_CACHE
+-struct outer_cache_fns outer_cache;
++struct outer_cache_fns outer_cache __read_mostly;
+ #endif
+
+ struct stack {
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 3f361a7..aa0d108 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -50,10 +50,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+ {
+ #ifdef CONFIG_KALLSYMS
+- char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
+- sprint_symbol(sym1, where);
+- sprint_symbol(sym2, from);
+- printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
++ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
+ #else
+ printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ #endif
+@@ -247,6 +244,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
+
+ DEFINE_SPINLOCK(die_lock);
+
++extern void gr_handle_kernel_exploit(void);
++
+ /*
+ * This function is protected against re-entrancy.
+ */
+@@ -271,6 +270,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
+ if (panic_on_oops)
+ panic("Fatal exception");
+
++ gr_handle_kernel_exploit();
++
+ do_exit(SIGSEGV);
+ }
+
+diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
+index aecf87df..bed731b 100644
+--- a/arch/arm/kernel/vmlinux.lds.S
++++ b/arch/arm/kernel/vmlinux.lds.S
+@@ -74,14 +74,18 @@ SECTIONS
+ #ifndef CONFIG_XIP_KERNEL
+ __init_begin = _stext;
+ INIT_DATA
++ EXIT_TEXT
++ EXIT_DATA
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+ #endif
+ }
+
+ /DISCARD/ : { /* Exit code and data */
++#ifdef CONFIG_XIP_KERNEL
+ EXIT_TEXT
+ EXIT_DATA
++#endif
+ *(.exitcall.exit)
+ *(.discard)
+ *(.ARM.exidx.exit.text)
+diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
+index e4fe124..0fc246b 100644
+--- a/arch/arm/lib/copy_from_user.S
++++ b/arch/arm/lib/copy_from_user.S
+@@ -16,7 +16,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_from_user(void *to, const void *from, size_t n)
++ * size_t ___copy_from_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -84,11 +84,11 @@
+
+ .text
+
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+
+ .section .fixup,"ax"
+ .align 0
+diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
+index 6ee2f67..d1cce76 100644
+--- a/arch/arm/lib/copy_page.S
++++ b/arch/arm/lib/copy_page.S
+@@ -10,6 +10,7 @@
+ * ASM optimised string functions
+ */
+ #include <linux/linkage.h>
++#include <linux/const.h>
+ #include <asm/assembler.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/cache.h>
+diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
+index 1a71e15..ac7b258 100644
+--- a/arch/arm/lib/copy_to_user.S
++++ b/arch/arm/lib/copy_to_user.S
+@@ -16,7 +16,7 @@
+ /*
+ * Prototype:
+ *
+- * size_t __copy_to_user(void *to, const void *from, size_t n)
++ * size_t ___copy_to_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+@@ -88,11 +88,11 @@
+ .text
+
+ ENTRY(__copy_to_user_std)
+-WEAK(__copy_to_user)
++WEAK(___copy_to_user)
+
+ #include "copy_template.S"
+
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+
+ .section .fixup,"ax"
+ .align 0
+diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
+index ffdd274..91017b6 100644
+--- a/arch/arm/lib/uaccess.S
++++ b/arch/arm/lib/uaccess.S
+@@ -19,7 +19,7 @@
+
+ #define PAGE_SHIFT 12
+
+-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
++/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block to user memory from kernel memory
+ * Params : to - user memory
+ * : from - kernel memory
+@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
+ sub r2, r2, ip
+ b .Lc2u_dest_aligned
+
+-ENTRY(__copy_to_user)
++ENTRY(___copy_to_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lc2u_not_enough
+@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
+ ldrgtb r3, [r1], #0
+ USER( strgtbt r3, [r0], #1) @ May fault
+ b .Lc2u_finished
+-ENDPROC(__copy_to_user)
++ENDPROC(___copy_to_user)
+
+ .section .fixup,"ax"
+ .align 0
+ 9001: ldmfd sp!, {r0, r4 - r7, pc}
+ .previous
+
+-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
++/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose : copy a block from user memory to kernel memory
+ * Params : to - kernel memory
+ * : from - user memory
+@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
+ sub r2, r2, ip
+ b .Lcfu_dest_aligned
+
+-ENTRY(__copy_from_user)
++ENTRY(___copy_from_user)
+ stmfd sp!, {r0, r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .Lcfu_not_enough
+@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
+ USER( ldrgtbt r3, [r1], #1) @ May fault
+ strgtb r3, [r0], #1
+ b .Lcfu_finished
+-ENDPROC(__copy_from_user)
++ENDPROC(___copy_from_user)
+
+ .section .fixup,"ax"
+ .align 0
+diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
+index 6b967ff..67d5b2b 100644
+--- a/arch/arm/lib/uaccess_with_memcpy.c
++++ b/arch/arm/lib/uaccess_with_memcpy.c
+@@ -97,7 +97,7 @@ out:
+ }
+
+ unsigned long
+-__copy_to_user(void __user *to, const void *from, unsigned long n)
++___copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ /*
+ * This test is stubbed out of the main function above to keep
+diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
+index 4028724..beec230 100644
+--- a/arch/arm/mach-at91/pm.c
++++ b/arch/arm/mach-at91/pm.c
+@@ -348,7 +348,7 @@ static void at91_pm_end(void)
+ }
+
+
+-static struct platform_suspend_ops at91_pm_ops ={
++static const struct platform_suspend_ops at91_pm_ops ={
+ .valid = at91_pm_valid_state,
+ .begin = at91_pm_begin,
+ .enter = at91_pm_enter,
+diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
+index 5218943..0a34552 100644
+--- a/arch/arm/mach-omap1/pm.c
++++ b/arch/arm/mach-omap1/pm.c
+@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
+
+
+
+-static struct platform_suspend_ops omap_pm_ops ={
++static const struct platform_suspend_ops omap_pm_ops ={
+ .prepare = omap_pm_prepare,
+ .enter = omap_pm_enter,
+ .finish = omap_pm_finish,
+diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
+index bff5c4e..d4c649b 100644
+--- a/arch/arm/mach-omap2/pm24xx.c
++++ b/arch/arm/mach-omap2/pm24xx.c
+@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
+ enable_hlt();
+ }
+
+-static struct platform_suspend_ops omap_pm_ops = {
++static const struct platform_suspend_ops omap_pm_ops = {
+ .prepare = omap2_pm_prepare,
+ .enter = omap2_pm_enter,
+ .finish = omap2_pm_finish,
+diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
+index 8946319..7d3e661 100644
+--- a/arch/arm/mach-omap2/pm34xx.c
++++ b/arch/arm/mach-omap2/pm34xx.c
+@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
+ return;
+ }
+
+-static struct platform_suspend_ops omap_pm_ops = {
++static const struct platform_suspend_ops omap_pm_ops = {
+ .begin = omap3_pm_begin,
+ .end = omap3_pm_end,
+ .prepare = omap3_pm_prepare,
+diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
+index b3d8d53..6e68ebc 100644
+--- a/arch/arm/mach-pnx4008/pm.c
++++ b/arch/arm/mach-pnx4008/pm.c
+@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
+ (state == PM_SUSPEND_MEM);
+ }
+
+-static struct platform_suspend_ops pnx4008_pm_ops = {
++static const struct platform_suspend_ops pnx4008_pm_ops = {
+ .enter = pnx4008_pm_enter,
+ .valid = pnx4008_pm_valid,
+ };
+diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
+index 7693355..9beb00a 100644
+--- a/arch/arm/mach-pxa/pm.c
++++ b/arch/arm/mach-pxa/pm.c
+@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
+ pxa_cpu_pm_fns->finish();
+ }
+
+-static struct platform_suspend_ops pxa_pm_ops = {
++static const struct platform_suspend_ops pxa_pm_ops = {
+ .valid = pxa_pm_valid,
+ .enter = pxa_pm_enter,
+ .prepare = pxa_pm_prepare,
+diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
+index 629e05d..06be589 100644
+--- a/arch/arm/mach-pxa/sharpsl_pm.c
++++ b/arch/arm/mach-pxa/sharpsl_pm.c
+@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
+ }
+
+ #ifdef CONFIG_PM
+-static struct platform_suspend_ops sharpsl_pm_ops = {
++static const struct platform_suspend_ops sharpsl_pm_ops = {
+ .prepare = pxa_pm_prepare,
+ .finish = pxa_pm_finish,
+ .enter = corgi_pxa_pm_enter,
+diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
+index c83fdc8..ab9fc44 100644
+--- a/arch/arm/mach-sa1100/pm.c
++++ b/arch/arm/mach-sa1100/pm.c
+@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
+ return virt_to_phys(sp);
+ }
+
+-static struct platform_suspend_ops sa11x0_pm_ops = {
++static const struct platform_suspend_ops sa11x0_pm_ops = {
+ .enter = sa11x0_pm_enter,
+ .valid = suspend_valid_only_mem,
+ };
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 3191cd6..68bd2d7 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -569,6 +569,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
+ const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+ struct siginfo info;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (fsr_fs(ifsr) == 2) {
++ unsigned int bkpt;
++
++ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
++ current->thread.error_code = ifsr;
++ current->thread.trap_no = 0;
++ pax_report_refcount_overflow(regs);
++ fixup_exception(regs);
++ return;
++ }
++ }
++#endif
++
+ if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+ return;
+
+diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
+index f5abc51..aac904f 100644
+--- a/arch/arm/mm/mmap.c
++++ b/arch/arm/mm/mmap.c
+@@ -30,6 +30,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+ #ifdef CONFIG_CPU_V6
+ unsigned int cache_type;
+ int do_align = 0, aliasing = 0;
+@@ -63,6 +64,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -70,15 +75,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
+- mm->cached_hole_size = 0;
++ start_addr = addr = mm->mmap_base;
++ mm->cached_hole_size = 0;
+ }
+
+ full_search:
+@@ -94,14 +98,14 @@ full_search:
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -116,7 +120,6 @@ full_search:
+ }
+ }
+
+-
+ /*
+ * You really shouldn't be using read() or write() on /dev/mem. This
+ * might go away in the future.
+diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
+index 8d97db2..b66cfa5 100644
+--- a/arch/arm/plat-s3c/pm.c
++++ b/arch/arm/plat-s3c/pm.c
+@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
+ s3c_pm_check_cleanup();
+ }
+
+-static struct platform_suspend_ops s3c_pm_ops = {
++static const struct platform_suspend_ops s3c_pm_ops = {
+ .enter = s3c_pm_enter,
+ .prepare = s3c_pm_prepare,
+ .finish = s3c_pm_finish,
+diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
+index d3cf35a..0ba6053 100644
+--- a/arch/avr32/include/asm/cache.h
++++ b/arch/avr32/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef __ASM_AVR32_CACHE_H
+ #define __ASM_AVR32_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
+index d5d1d41..856e2ed 100644
+--- a/arch/avr32/include/asm/elf.h
++++ b/arch/avr32/include/asm/elf.h
+@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x00001000UL
++
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
+index b7f5c68..556135c 100644
+--- a/arch/avr32/include/asm/kmap_types.h
++++ b/arch/avr32/include/asm/kmap_types.h
+@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
+ D(11) KM_IRQ1,
+ D(12) KM_SOFTIRQ0,
+ D(13) KM_SOFTIRQ1,
+-D(14) KM_TYPE_NR
++D(14) KM_CLEARPAGE,
++D(15) KM_TYPE_NR
+ };
+
+ #undef D
+diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
+index f021edf..32d680e 100644
+--- a/arch/avr32/mach-at32ap/pm.c
++++ b/arch/avr32/mach-at32ap/pm.c
+@@ -176,7 +176,7 @@ out:
+ return 0;
+ }
+
+-static struct platform_suspend_ops avr32_pm_ops = {
++static const struct platform_suspend_ops avr32_pm_ops = {
+ .valid = avr32_pm_valid_state,
+ .enter = avr32_pm_enter,
+ };
+diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
+index b61d86d..e292c7f 100644
+--- a/arch/avr32/mm/fault.c
++++ b/arch/avr32/mm/fault.c
+@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
+
+ int exception_trace = 1;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%02x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+@@ -157,6 +174,16 @@ bad_area:
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ if (exception_trace && printk_ratelimit())
+ printk("%s%s[%d]: segfault at %08lx pc %08lx "
+ "sp %08lx ecr %lu\n",
+diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
+index 93f6c63..d144953 100644
+--- a/arch/blackfin/include/asm/cache.h
++++ b/arch/blackfin/include/asm/cache.h
+@@ -7,12 +7,14 @@
+ #ifndef __ARCH_BLACKFIN_CACHE_H
+ #define __ARCH_BLACKFIN_CACHE_H
+
++#include <linux/const.h>
++
+ /*
+ * Bytes per L1 cache line
+ * Blackfin loads 32 bytes for cache
+ */
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+ #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
+index cce79d0..c406c85 100644
+--- a/arch/blackfin/kernel/kgdb.c
++++ b/arch/blackfin/kernel/kgdb.c
+@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
+ return -1; /* this means that we do not want to exit from the handler */
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0xa1},
+ #ifdef CONFIG_SMP
+ .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
+diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
+index 8837be4..b2fb413 100644
+--- a/arch/blackfin/mach-common/pm.c
++++ b/arch/blackfin/mach-common/pm.c
+@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
+ return 0;
+ }
+
+-struct platform_suspend_ops bfin_pm_ops = {
++const struct platform_suspend_ops bfin_pm_ops = {
+ .enter = bfin_pm_enter,
+ .valid = bfin_pm_valid,
+ };
+diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
+index aea2718..3639a60 100644
+--- a/arch/cris/include/arch-v10/arch/cache.h
++++ b/arch/cris/include/arch-v10/arch/cache.h
+@@ -1,8 +1,9 @@
+ #ifndef _ASM_ARCH_CACHE_H
+ #define _ASM_ARCH_CACHE_H
+
++#include <linux/const.h>
+ /* Etrax 100LX have 32-byte cache-lines. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_ARCH_CACHE_H */
+diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
+index dfc7305..417f5b3 100644
+--- a/arch/cris/include/arch-v32/arch/cache.h
++++ b/arch/cris/include/arch-v32/arch/cache.h
+@@ -1,11 +1,12 @@
+ #ifndef _ASM_CRIS_ARCH_CACHE_H
+ #define _ASM_CRIS_ARCH_CACHE_H
+
++#include <linux/const.h>
+ #include <arch/hwregs/dma.h>
+
+ /* A cache-line is 32 bytes. */
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ void flush_dma_list(dma_descr_data *descr);
+ void flush_dma_descr(dma_descr_data *descr, int flush_buf);
+diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
+index 00a57af..c3ef0cd 100644
+--- a/arch/frv/include/asm/atomic.h
++++ b/arch/frv/include/asm/atomic.h
+@@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
+ #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
+ #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+ int c, old;
+diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
+index 7dc0f0f..1e6a620 100644
+--- a/arch/frv/include/asm/cache.h
++++ b/arch/frv/include/asm/cache.h
+@@ -12,10 +12,11 @@
+ #ifndef __ASM_CACHE_H
+ #define __ASM_CACHE_H
+
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
+diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
+index f8e16b2..c73ff79 100644
+--- a/arch/frv/include/asm/kmap_types.h
++++ b/arch/frv/include/asm/kmap_types.h
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
+index 385fd30..27cf8ba 100644
+--- a/arch/frv/mm/elf-fdpic.c
++++ b/arch/frv/mm/elf-fdpic.c
+@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ {
+ struct vm_area_struct *vma;
+ unsigned long limit;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ goto success;
+ }
+
+@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, offset))
+ goto success;
+ addr = vma->vm_end;
+ }
+@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ for (; vma; vma = vma->vm_next) {
+ if (addr > limit)
+ break;
+- if (addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, offset))
+ goto success;
+ addr = vma->vm_end;
+ }
+diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
+index c635028..6d9445a 100644
+--- a/arch/h8300/include/asm/cache.h
++++ b/arch/h8300/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef __ARCH_H8300_CACHE_H
+ #define __ARCH_H8300_CACHE_H
+
++#include <linux/const.h>
++
+ /* bytes per L1 cache line */
+-#define L1_CACHE_BYTES 4
++#define L1_CACHE_BYTES _AC(4,UL)
+
+ /* m68k-elf-gcc 2.95.2 doesn't like these */
+
+diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
+index e4a80d8..11a7ea1 100644
+--- a/arch/ia64/hp/common/hwsw_iommu.c
++++ b/arch/ia64/hp/common/hwsw_iommu.c
+@@ -17,7 +17,7 @@
+ #include <linux/swiotlb.h>
+ #include <asm/machvec.h>
+
+-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
++extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
+
+ /* swiotlb declarations & definitions: */
+ extern int swiotlb_late_init_with_default_size (size_t size);
+@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
+ !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
+ }
+
+-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
++const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
+ {
+ if (use_swiotlb(dev))
+ return &swiotlb_dma_ops;
+diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
+index 01ae69b..35752fd 100644
+--- a/arch/ia64/hp/common/sba_iommu.c
++++ b/arch/ia64/hp/common/sba_iommu.c
+@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
+ },
+ };
+
+-extern struct dma_map_ops swiotlb_dma_ops;
++extern const struct dma_map_ops swiotlb_dma_ops;
+
+ static int __init
+ sba_init(void)
+@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
+
+ __setup("sbapagesize=",sba_page_override);
+
+-struct dma_map_ops sba_dma_ops = {
++const struct dma_map_ops sba_dma_ops = {
+ .alloc_coherent = sba_alloc_coherent,
+ .free_coherent = sba_free_coherent,
+ .map_page = sba_map_page,
+diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
+index c69552b..c7122f4 100644
+--- a/arch/ia64/ia32/binfmt_elf32.c
++++ b/arch/ia64/ia32/binfmt_elf32.c
+@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
+
+ #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ /* Ugly but avoids duplication */
+ #include "../../../fs/binfmt_elf.c"
+
+diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
+index 0f15349..26b3429 100644
+--- a/arch/ia64/ia32/ia32priv.h
++++ b/arch/ia64/ia32/ia32priv.h
+@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
+ #define ELF_DATA ELFDATA2LSB
+ #define ELF_ARCH EM_386
+
+-#define IA32_STACK_TOP IA32_PAGE_OFFSET
++#ifdef CONFIG_PAX_RANDUSTACK
++#define __IA32_DELTA_STACK (current->mm->delta_stack)
++#else
++#define __IA32_DELTA_STACK 0UL
++#endif
++
++#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
++
+ #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
+ #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
+
+diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
+index 88405cb..de5ca5d 100644
+--- a/arch/ia64/include/asm/atomic.h
++++ b/arch/ia64/include/asm/atomic.h
+@@ -210,6 +210,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
+ #define atomic64_inc(v) atomic64_add(1, (v))
+ #define atomic64_dec(v) atomic64_sub(1, (v))
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ /* Atomic operations are already serializing */
+ #define smp_mb__before_atomic_dec() barrier()
+ #define smp_mb__after_atomic_dec() barrier()
+diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
+index e7482bd..d1c9b8e 100644
+--- a/arch/ia64/include/asm/cache.h
++++ b/arch/ia64/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef _ASM_IA64_CACHE_H
+ #define _ASM_IA64_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+@@ -9,7 +10,7 @@
+
+ /* Bytes per L1 (data) cache line. */
+ #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #ifdef CONFIG_SMP
+ # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
+index 8d3c79c..71b3af6 100644
+--- a/arch/ia64/include/asm/dma-mapping.h
++++ b/arch/ia64/include/asm/dma-mapping.h
+@@ -12,7 +12,7 @@
+
+ #define ARCH_HAS_DMA_GET_REQUIRED_MASK
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+ extern struct ia64_machine_vector ia64_mv;
+ extern void set_iommu_machvec(void);
+
+@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *daddr, gfp_t gfp)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ void *caddr;
+
+ caddr = ops->alloc_coherent(dev, size, daddr, gfp);
+@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *caddr, dma_addr_t daddr)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ debug_dma_free_coherent(dev, size, caddr, daddr);
+ ops->free_coherent(dev, size, caddr, daddr);
+ }
+@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
+
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->mapping_error(dev, daddr);
+ }
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *ops = platform_dma_get_ops(dev);
++ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
+ return ops->dma_supported(dev, mask);
+ }
+
+diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
+index 86eddee..b116bb4 100644
+--- a/arch/ia64/include/asm/elf.h
++++ b/arch/ia64/include/asm/elf.h
+@@ -43,6 +43,13 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
++#endif
++
+ #define PT_IA_64_UNWIND 0x70000001
+
+ /* IA-64 relocations: */
+diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
+index 367d299..9ad4279 100644
+--- a/arch/ia64/include/asm/machvec.h
++++ b/arch/ia64/include/asm/machvec.h
+@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
+ /* DMA-mapping interface: */
+ typedef void ia64_mv_dma_init (void);
+ typedef u64 ia64_mv_dma_get_required_mask (struct device *);
+-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
++typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
+
+ /*
+ * WARNING: The legacy I/O space is _architected_. Platforms are
+@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
+ # endif /* CONFIG_IA64_GENERIC */
+
+ extern void swiotlb_dma_init(void);
+-extern struct dma_map_ops *dma_get_ops(struct device *);
++extern const struct dma_map_ops *dma_get_ops(struct device *);
+
+ /*
+ * Define default versions so we can extend machvec for new platforms without having
+diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
+index 96a8d92..617a1cf 100644
+--- a/arch/ia64/include/asm/pgalloc.h
++++ b/arch/ia64/include/asm/pgalloc.h
+@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
+ pgd_val(*pgd_entry) = __pa(pud);
+ }
+
++static inline void
++pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
++{
++ pgd_populate(mm, pgd_entry, pud);
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
+ pud_val(*pud_entry) = __pa(pmd);
+ }
+
++static inline void
++pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
++{
++ pud_populate(mm, pud_entry, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return quicklist_alloc(0, GFP_KERNEL, NULL);
+diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
+index 8840a69..cdb63d9 100644
+--- a/arch/ia64/include/asm/pgtable.h
++++ b/arch/ia64/include/asm/pgtable.h
+@@ -12,7 +12,7 @@
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+-
++#include <linux/const.h>
+ #include <asm/mman.h>
+ #include <asm/page.h>
+ #include <asm/processor.h>
+@@ -143,6 +143,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
+index 3eaeedf..0530962 100644
+--- a/arch/ia64/include/asm/processor.h
++++ b/arch/ia64/include/asm/processor.h
+@@ -361,7 +361,7 @@ struct thread_struct {
+ regs->loadrs = 0; \
+ regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
+ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
+- if (unlikely(!get_dumpable(current->mm))) { \
++ if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
+ /* \
+ * Zap scratch regs to avoid leaking bits between processes with different \
+ * uid/privileges. \
+diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
+index 239ecdc..f94170e 100644
+--- a/arch/ia64/include/asm/spinlock.h
++++ b/arch/ia64/include/asm/spinlock.h
+@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+ unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
+
+ asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
++ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
+ }
+
+ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
+diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
+index 449c8c0..18965fb 100644
+--- a/arch/ia64/include/asm/uaccess.h
++++ b/arch/ia64/include/asm/uaccess.h
+@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
+ static inline unsigned long
+ __copy_to_user (void __user *to, const void *from, unsigned long count)
+ {
++ if (count > INT_MAX)
++ return count;
++
++ if (!__builtin_constant_p(count))
++ check_object_size(from, count, true);
++
+ return __copy_user(to, (__force void __user *) from, count);
+ }
+
+ static inline unsigned long
+ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ {
++ if (count > INT_MAX)
++ return count;
++
++ if (!__builtin_constant_p(count))
++ check_object_size(to, count, false);
++
+ return __copy_user((__force void __user *) to, from, count);
+ }
+
+@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({ \
+ void __user *__cu_to = (to); \
+ const void *__cu_from = (from); \
+- long __cu_len = (n); \
++ unsigned long __cu_len = (n); \
+ \
+- if (__access_ok(__cu_to, __cu_len, get_fs())) \
++ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
++ if (!__builtin_constant_p(n)) \
++ check_object_size(__cu_from, __cu_len, true); \
+ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
++ } \
+ __cu_len; \
+ })
+
+@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ ({ \
+ void *__cu_to = (to); \
+ const void __user *__cu_from = (from); \
+- long __cu_len = (n); \
++ unsigned long __cu_len = (n); \
+ \
+ __chk_user_ptr(__cu_from); \
+- if (__access_ok(__cu_from, __cu_len, get_fs())) \
++ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
++ if (!__builtin_constant_p(n)) \
++ check_object_size(__cu_to, __cu_len, false); \
+ __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
++ } \
+ __cu_len; \
+ })
+
+diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
+index f2c1600..969398a 100644
+--- a/arch/ia64/kernel/dma-mapping.c
++++ b/arch/ia64/kernel/dma-mapping.c
+@@ -3,7 +3,7 @@
+ /* Set this to 1 if there is a HW IOMMU in the system */
+ int iommu_detected __read_mostly;
+
+-struct dma_map_ops *dma_ops;
++const struct dma_map_ops *dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+@@ -16,7 +16,7 @@ static int __init dma_init(void)
+ }
+ fs_initcall(dma_init);
+
+-struct dma_map_ops *dma_get_ops(struct device *dev)
++const struct dma_map_ops *dma_get_ops(struct device *dev)
+ {
+ return dma_ops;
+ }
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 1481b0a..e7d38ff 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
+ void
+ module_free (struct module *mod, void *module_region)
+ {
+- if (mod && mod->arch.init_unw_table &&
+- module_region == mod->module_init) {
++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
+ unw_remove_unwind_table(mod->arch.init_unw_table);
+ mod->arch.init_unw_table = NULL;
+ }
+@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
+ }
+
+ static inline int
++in_init_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
++}
++
++static inline int
++in_init_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
++}
++
++static inline int
+ in_init (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_init < mod->init_size;
++ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
++}
++
++static inline int
++in_core_rx (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
++}
++
++static inline int
++in_core_rw (const struct module *mod, uint64_t addr)
++{
++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
+ }
+
+ static inline int
+ in_core (const struct module *mod, uint64_t addr)
+ {
+- return addr - (uint64_t) mod->module_core < mod->core_size;
++ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
+ }
+
+ static inline int
+@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
+ break;
+
+ case RV_BDREL:
+- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
++ if (in_init_rx(mod, val))
++ val -= (uint64_t) mod->module_init_rx;
++ else if (in_init_rw(mod, val))
++ val -= (uint64_t) mod->module_init_rw;
++ else if (in_core_rx(mod, val))
++ val -= (uint64_t) mod->module_core_rx;
++ else if (in_core_rw(mod, val))
++ val -= (uint64_t) mod->module_core_rw;
+ break;
+
+ case RV_LTV:
+@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
+ * addresses have been selected...
+ */
+ uint64_t gp;
+- if (mod->core_size > MAX_LTOFF)
++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
+ /*
+ * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
+ * at the end of the module.
+ */
+- gp = mod->core_size - MAX_LTOFF / 2;
++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
+ else
+- gp = mod->core_size / 2;
+- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
++ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
+ mod->arch.gp = gp;
+ DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
+ }
+diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
+index fdf6f9d..dae10ed 100644
+--- a/arch/ia64/kernel/palinfo.c
++++ b/arch/ia64/kernel/palinfo.c
+@@ -977,7 +977,7 @@ create_palinfo_proc_entries(unsigned int cpu)
+ struct proc_dir_entry **pdir;
+ struct proc_dir_entry *cpu_dir;
+ int j;
+- char cpustr[sizeof(CPUSTR)];
++ char cpustr[3+4+1];
+
+
+ /*
+diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
+index f6b1ff0..de773fb 100644
+--- a/arch/ia64/kernel/pci-dma.c
++++ b/arch/ia64/kernel/pci-dma.c
+@@ -43,7 +43,7 @@ struct device fallback_dev = {
+ .dma_mask = &fallback_dev.coherent_dma_mask,
+ };
+
+-extern struct dma_map_ops intel_dma_ops;
++extern const struct dma_map_ops intel_dma_ops;
+
+ static int __init pci_iommu_init(void)
+ {
+@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
+ }
+ EXPORT_SYMBOL(iommu_dma_supported);
+
++extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
++extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
++extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
++extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
++extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
++extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
++extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
++
++static const struct dma_map_ops intel_iommu_dma_ops = {
++ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
++ .alloc_coherent = intel_alloc_coherent,
++ .free_coherent = intel_free_coherent,
++ .map_sg = intel_map_sg,
++ .unmap_sg = intel_unmap_sg,
++ .map_page = intel_map_page,
++ .unmap_page = intel_unmap_page,
++ .mapping_error = intel_mapping_error,
++
++ .sync_single_for_cpu = machvec_dma_sync_single,
++ .sync_sg_for_cpu = machvec_dma_sync_sg,
++ .sync_single_for_device = machvec_dma_sync_single,
++ .sync_sg_for_device = machvec_dma_sync_sg,
++ .dma_supported = iommu_dma_supported,
++};
++
+ void __init pci_iommu_alloc(void)
+ {
+- dma_ops = &intel_dma_ops;
+-
+- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
+- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
+- dma_ops->sync_single_for_device = machvec_dma_sync_single;
+- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
+- dma_ops->dma_supported = iommu_dma_supported;
++ dma_ops = &intel_iommu_dma_ops;
+
+ /*
+ * The order of these functions is important for
+diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
+index 285aae8..61dbab6 100644
+--- a/arch/ia64/kernel/pci-swiotlb.c
++++ b/arch/ia64/kernel/pci-swiotlb.c
+@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
+ return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+ }
+
+-struct dma_map_ops swiotlb_dma_ops = {
++const struct dma_map_ops swiotlb_dma_ops = {
+ .alloc_coherent = ia64_swiotlb_alloc_coherent,
+ .free_coherent = swiotlb_free_coherent,
+ .map_page = swiotlb_map_page,
+diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
+index f178270..2dcff27 100644
+--- a/arch/ia64/kernel/perfmon.c
++++ b/arch/ia64/kernel/perfmon.c
+@@ -2372,7 +2372,6 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
+ */
+ insert_vm_struct(mm, vma);
+
+- mm->total_vm += size >> PAGE_SHIFT;
+ vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
+ vma_pages(vma));
+ up_write(&task->mm->mmap_sem);
+diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
+index 609d500..254a3d7 100644
+--- a/arch/ia64/kernel/sys_ia64.c
++++ b/arch/ia64/kernel/sys_ia64.c
+@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ unsigned long start_addr, align_mask = PAGE_SIZE - 1;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ if (REGION_NUMBER(addr) == RGN_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = mm->free_area_cache;
+
+@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
++ if (start_addr != mm->mmap_base) {
+ /* Start a new search --- just in case we missed some holes. */
+- addr = TASK_UNMAPPED_BASE;
++ addr = mm->mmap_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /* Remember the address where we stopped this search: */
+ mm->free_area_cache = addr + len;
+ return addr;
+diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
+index 8f06035..b3a5818 100644
+--- a/arch/ia64/kernel/topology.c
++++ b/arch/ia64/kernel/topology.c
+@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
+ return ret;
+ }
+
+-static struct sysfs_ops cache_sysfs_ops = {
++static const struct sysfs_ops cache_sysfs_ops = {
+ .show = cache_show
+ };
+
+diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
+index 0a0c77b..8e55a81 100644
+--- a/arch/ia64/kernel/vmlinux.lds.S
++++ b/arch/ia64/kernel/vmlinux.lds.S
+@@ -190,7 +190,7 @@ SECTIONS
+ /* Per-cpu data: */
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ PERCPU_VADDR(PERCPU_ADDR, :percpu)
+- __phys_per_cpu_start = __per_cpu_load;
++ __phys_per_cpu_start = per_cpu_load;
+ . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
+ * into percpu page size
+ */
+diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
+index 19261a9..1611b7a 100644
+--- a/arch/ia64/mm/fault.c
++++ b/arch/ia64/mm/fault.c
+@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void __kprobes
+ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
+ {
+@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
+ mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
+
++ }
++
+ survive:
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
+index b0f6157..0a3ade5 100644
+--- a/arch/ia64/mm/hugetlbpage.c
++++ b/arch/ia64/mm/hugetlbpage.c
+@@ -150,6 +150,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ unsigned long pgoff, unsigned long flags)
+ {
+ struct vm_area_struct *vmm;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
+
+ if (len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+@@ -172,7 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
+ return -ENOMEM;
+- if (!vmm || (addr + len) <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, &addr, len, offset))
+ return addr;
+ addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
+ }
+diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
+index 1857766..05cc6a3 100644
+--- a/arch/ia64/mm/init.c
++++ b/arch/ia64/mm/init.c
+@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
+ vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
+ vma->vm_end = vma->vm_start + PAGE_SIZE;
+ vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
++ vma->vm_flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->mm->pax_flags & MF_PAX_MPROTECT)
++ vma->vm_flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ down_write(&current->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
+index 98b6849..8046766 100644
+--- a/arch/ia64/sn/pci/pci_dma.c
++++ b/arch/ia64/sn/pci/pci_dma.c
+@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
+ return ret;
+ }
+
+-static struct dma_map_ops sn_dma_ops = {
++static const struct dma_map_ops sn_dma_ops = {
+ .alloc_coherent = sn_dma_alloc_coherent,
+ .free_coherent = sn_dma_free_coherent,
+ .map_page = sn_dma_map_page,
+diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
+index 40b3ee9..8c2c112 100644
+--- a/arch/m32r/include/asm/cache.h
++++ b/arch/m32r/include/asm/cache.h
+@@ -1,8 +1,10 @@
+ #ifndef _ASM_M32R_CACHE_H
+ #define _ASM_M32R_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_M32R_CACHE_H */
+diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
+index 82abd15..d95ae5d 100644
+--- a/arch/m32r/lib/usercopy.c
++++ b/arch/m32r/lib/usercopy.c
+@@ -14,6 +14,9 @@
+ unsigned long
+ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetch(from);
+ if (access_ok(VERIFY_WRITE, to, n))
+ __copy_user(to,from,n);
+@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+ unsigned long
+ __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ prefetchw(to);
+ if (access_ok(VERIFY_READ, from, n))
+ __copy_user_zeroing(to,from,n);
+diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
+index ecafbe1..432c3e4 100644
+--- a/arch/m68k/include/asm/cache.h
++++ b/arch/m68k/include/asm/cache.h
+@@ -4,9 +4,11 @@
+ #ifndef __ARCH_M68K_CACHE_H
+ #define __ARCH_M68K_CACHE_H
+
++#include <linux/const.h>
++
+ /* bytes per L1 cache line */
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
+
+diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
+index c209c47..2ba96e2 100644
+--- a/arch/microblaze/include/asm/cache.h
++++ b/arch/microblaze/include/asm/cache.h
+@@ -13,11 +13,12 @@
+ #ifndef _ASM_MICROBLAZE_CACHE_H
+ #define _ASM_MICROBLAZE_CACHE_H
+
++#include <linux/const.h>
+ #include <asm/registers.h>
+
+ #define L1_CACHE_SHIFT 2
+ /* word-granular cache in microblaze */
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index fd7620f..63d73a6 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -5,6 +5,7 @@ config MIPS
+ select HAVE_IDE
+ select HAVE_OPROFILE
+ select HAVE_ARCH_KGDB
++ select GENERIC_ATOMIC64 if !64BIT
+ # Horrible source of confusion. Die, die, die ...
+ select EMBEDDED
+ select RTC_LIB if !LEMOTE_FULOONG2E
+diff --git a/arch/mips/Makefile b/arch/mips/Makefile
+index 57ff855..b603951 100644
+--- a/arch/mips/Makefile
++++ b/arch/mips/Makefile
+@@ -51,6 +51,8 @@ endif
+ cflags-y := -ffunction-sections
+ cflags-y += $(call cc-option, -mno-check-zero-division)
+
++cflags-y += -Wno-sign-compare -Wno-extra
++
+ ifdef CONFIG_32BIT
+ ld-emul = $(32bit-emul)
+ vmlinux-32 = vmlinux
+diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
+index 632f986..fd0378d 100644
+--- a/arch/mips/alchemy/devboards/pm.c
++++ b/arch/mips/alchemy/devboards/pm.c
+@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
+
+ }
+
+-static struct platform_suspend_ops db1x_pm_ops = {
++static const struct platform_suspend_ops db1x_pm_ops = {
+ .valid = suspend_valid_only_mem,
+ .begin = db1x_pm_begin,
+ .enter = db1x_pm_enter,
+diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
+index 09e7128..111035b 100644
+--- a/arch/mips/include/asm/atomic.h
++++ b/arch/mips/include/asm/atomic.h
+@@ -21,6 +21,10 @@
+ #include <asm/war.h>
+ #include <asm/system.h>
+
++#ifdef CONFIG_GENERIC_ATOMIC64
++#include <asm-generic/atomic64.h>
++#endif
++
+ #define ATOMIC_INIT(i) { (i) }
+
+ /*
+@@ -782,6 +786,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ */
+ #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #endif /* CONFIG_64BIT */
+
+ /*
+diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
+index 37f175c..c7a3065 100644
+--- a/arch/mips/include/asm/cache.h
++++ b/arch/mips/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #ifndef _ASM_CACHE_H
+ #define _ASM_CACHE_H
+
++#include <linux/const.h>
+ #include <kmalloc.h>
+
+ #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
+index 7990694..4e93acf 100644
+--- a/arch/mips/include/asm/elf.h
++++ b/arch/mips/include/asm/elf.h
+@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #endif /* _ASM_ELF_H */
+diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
+index f266295..627cfff 100644
+--- a/arch/mips/include/asm/page.h
++++ b/arch/mips/include/asm/page.h
+@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
+ #ifdef CONFIG_CPU_MIPS32
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
+ #else
+ typedef struct { unsigned long long pte; } pte_t;
+ #define pte_val(x) ((x).pte)
+diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
+index 3738f4b..9020d86 100644
+--- a/arch/mips/include/asm/pgalloc.h
++++ b/arch/mips/include/asm/pgalloc.h
+@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+ set_pud(pud, __pud((unsigned long)pmd));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
+ #endif
+
+ /*
+diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
+index e48c0bf..f3acf65 100644
+--- a/arch/mips/include/asm/reboot.h
++++ b/arch/mips/include/asm/reboot.h
+@@ -9,7 +9,7 @@
+ #ifndef _ASM_REBOOT_H
+ #define _ASM_REBOOT_H
+
+-extern void (*_machine_restart)(char *command);
+-extern void (*_machine_halt)(void);
++extern void (*__noreturn _machine_restart)(char *command);
++extern void (*__noreturn _machine_halt)(void);
+
+ #endif /* _ASM_REBOOT_H */
+diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
+index 83b5509..9fa24a23 100644
+--- a/arch/mips/include/asm/system.h
++++ b/arch/mips/include/asm/system.h
+@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
+ */
+ #define __ARCH_WANT_UNLOCKED_CTXSW
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ #endif /* _ASM_SYSTEM_H */
+diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
+index 0e50757..4e9432c 100644
+--- a/arch/mips/include/asm/thread_info.h
++++ b/arch/mips/include/asm/thread_info.h
+@@ -120,6 +120,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
+ #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
+ #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
+ #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
++/* li takes a 32bit immediate */
++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
+ #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
+
+ #ifdef CONFIG_MIPS32_O32
+@@ -144,11 +146,14 @@ register struct thread_info *__current_thread_info __asm__("$28");
+ #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
+ #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
+ #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
++
++#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
+
+ /* work to do on interrupt/exception return */
+ #define _TIF_WORK_MASK (0x0000ffef & ~_TIF_SECCOMP)
+ /* work to do on any return to u-space */
+-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
++#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
+
+ #endif /* __KERNEL__ */
+
+diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
+index 9fdd8bc..fcf9d68 100644
+--- a/arch/mips/kernel/binfmt_elfn32.c
++++ b/arch/mips/kernel/binfmt_elfn32.c
+@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/elfcore.h>
+diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
+index ff44823..cf0b48a 100644
+--- a/arch/mips/kernel/binfmt_elfo32.c
++++ b/arch/mips/kernel/binfmt_elfo32.c
+@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+
+ /*
+diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
+index ffa3310..f8b1e06 100644
+--- a/arch/mips/kernel/entry.S
++++ b/arch/mips/kernel/entry.S
+@@ -167,7 +167,7 @@ work_notifysig: # deal with pending signals and
+ FEXPORT(syscall_exit_work_partial)
+ SAVE_STATIC
+ syscall_exit_work:
+- li t0, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++ li t0, _TIF_SYSCALL_WORK
+ and t0, a2 # a2 is preloaded with TI_FLAGS
+ beqz t0, work_pending # trace bit set?
+ local_irq_enable # could let do_syscall_trace()
+diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
+index 50c9bb8..efdd5f8 100644
+--- a/arch/mips/kernel/kgdb.c
++++ b/arch/mips/kernel/kgdb.c
+@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ return -1;
+ }
+
++/* cannot be const */
+ struct kgdb_arch arch_kgdb_ops;
+
+ /*
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index f3d73e1..bb3f57a 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
+ out:
+ return pc;
+ }
+-
+-/*
+- * Don't forget that the stack pointer must be aligned on a 8 bytes
+- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+- */
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+-
+- return sp & ALMASK;
+-}
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 054861c..ddbbc7d 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -558,6 +558,10 @@ static inline int audit_arch(void)
+ return arch;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+@@ -568,6 +572,11 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
+ if (!entryexit)
+ secure_computing(regs->regs[0]);
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (unlikely(current->audit_context) && entryexit)
+ audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
+ regs->regs[2]);
+diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
+index 060563a..7fbf310 100644
+--- a/arch/mips/kernel/reset.c
++++ b/arch/mips/kernel/reset.c
+@@ -19,8 +19,8 @@
+ * So handle all using function pointers to machine specific
+ * functions.
+ */
+-void (*_machine_restart)(char *command);
+-void (*_machine_halt)(void);
++void (*__noreturn _machine_restart)(char *command);
++void (*__noreturn _machine_halt)(void);
+ void (*pm_power_off)(void);
+
+ EXPORT_SYMBOL(pm_power_off);
+@@ -29,16 +29,19 @@ void machine_restart(char *command)
+ {
+ if (_machine_restart)
+ _machine_restart(command);
++ BUG();
+ }
+
+ void machine_halt(void)
+ {
+ if (_machine_halt)
+ _machine_halt();
++ BUG();
+ }
+
+ void machine_power_off(void)
+ {
+ if (pm_power_off)
+ pm_power_off();
++ BUG();
+ }
+diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
+index fd2a9bb..73ecc89 100644
+--- a/arch/mips/kernel/scall32-o32.S
++++ b/arch/mips/kernel/scall32-o32.S
+@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
+
+ stack_done:
+ lw t0, TI_FLAGS($28) # syscall tracing enabled?
+- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++ li t1, _TIF_SYSCALL_WORK
+ and t0, t1
+ bnez t0, syscall_trace_entry # -> yes
+
+diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
+index 18bf7f3..6659dde 100644
+--- a/arch/mips/kernel/scall64-64.S
++++ b/arch/mips/kernel/scall64-64.S
+@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
+- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++ li t1, _TIF_SYSCALL_WORK
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, syscall_trace_entry
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
+index 6ebc079..a16f976 100644
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
+- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++ li t1, _TIF_SYSCALL_WORK
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, n32_syscall_trace_entry
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index 14dde4c..dc68acf 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
+ PTR 4b, bad_stack
+ .previous
+
+- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
++ li t1, _TIF_SYSCALL_WORK
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, trace_a_syscall
+diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
+index 3f7f466..311c777 100644
+--- a/arch/mips/kernel/syscall.c
++++ b/arch/mips/kernel/syscall.c
+@@ -78,6 +78,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct vm_area_struct * vmm;
+ int do_color_align;
+ unsigned long task_size;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ task_size = STACK_TOP;
+
+@@ -102,17 +103,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+ vmm = find_vma(current->mm, addr);
+- if (task_size - len >= addr &&
+- (!vmm || addr + len <= vmm->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
+ return addr;
+ }
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+@@ -122,7 +127,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ /* At this point: (!vmm || addr < vmm->vm_end). */
+ if (task_size - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, addr, len, offset))
+ return addr;
+ addr = vmm->vm_end;
+ if (do_color_align)
+diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
+index e97a7a2..f18f5b0 100644
+--- a/arch/mips/mm/fault.c
++++ b/arch/mips/mm/fault.c
+@@ -26,6 +26,23 @@
+ #include <asm/ptrace.h>
+ #include <asm/highmem.h> /* For VMALLOC_END */
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+index bdc1f9a..e8de5c5 100644
+--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
++++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
+@@ -11,12 +11,14 @@
+ #ifndef _ASM_PROC_CACHE_H
+ #define _ASM_PROC_CACHE_H
+
++#include <linux/const.h>
++
+ /* L1 cache */
+
+ #define L1_CACHE_NWAYS 4 /* number of ways in caches */
+ #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
+-#define L1_CACHE_BYTES 16 /* bytes per entry */
+ #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
+ #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
+
+ #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
+diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
+index 6ee459d..71c1831 100644
+--- a/arch/parisc/include/asm/atomic.h
++++ b/arch/parisc/include/asm/atomic.h
+@@ -336,6 +336,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #else /* CONFIG_64BIT */
+
+ #include <asm-generic/atomic64.h>
+diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
+index 32c2cca..a7b3a64 100644
+--- a/arch/parisc/include/asm/cache.h
++++ b/arch/parisc/include/asm/cache.h
+@@ -5,6 +5,7 @@
+ #ifndef __ARCH_PARISC_CACHE_H
+ #define __ARCH_PARISC_CACHE_H
+
++#include <linux/const.h>
+
+ /*
+ * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
+@@ -15,13 +16,13 @@
+ * just ruin performance.
+ */
+ #ifdef CONFIG_PA20
+-#define L1_CACHE_BYTES 64
+ #define L1_CACHE_SHIFT 6
+ #else
+-#define L1_CACHE_BYTES 32
+ #define L1_CACHE_SHIFT 5
+ #endif
+
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
++
+ #ifndef __ASSEMBLY__
+
+ #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
+diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
+index 9c802eb..0592e41 100644
+--- a/arch/parisc/include/asm/elf.h
++++ b/arch/parisc/include/asm/elf.h
+@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
+index fc987a1..6e068ef 100644
+--- a/arch/parisc/include/asm/pgalloc.h
++++ b/arch/parisc/include/asm/pgalloc.h
+@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+ (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
+ }
+
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
++{
++ pgd_populate(mm, pgd, pmd);
++}
++
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+ pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
+@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+ #define pmd_free(mm, x) do { } while (0)
+ #define pgd_populate(mm, pmd, pte) BUG()
++#define pgd_populate_kernel(mm, pmd, pte) BUG()
+
+ #endif
+
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index a27d2e2..18fd845 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -207,6 +207,17 @@
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
+ #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index 994bcd9..f25247a 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -393,7 +393,7 @@ EXPORT_SYMBOL(print_pci_hwpath);
+ static void setup_bus_id(struct parisc_device *padev)
+ {
+ struct hardware_path path;
+- char name[20];
++ char name[28];
+ char *output = name;
+ int i;
+
+diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
+index 2120746..8d70a5e 100644
+--- a/arch/parisc/kernel/module.c
++++ b/arch/parisc/kernel/module.c
+@@ -95,16 +95,38 @@
+
+ /* three functions to determine where in the module core
+ * or init pieces the location is */
++static inline int in_init_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rx &&
++ loc < (me->module_init_rx + me->init_size_rx));
++}
++
++static inline int in_init_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_init_rw &&
++ loc < (me->module_init_rw + me->init_size_rw));
++}
++
+ static inline int in_init(struct module *me, void *loc)
+ {
+- return (loc >= me->module_init &&
+- loc <= (me->module_init + me->init_size));
++ return in_init_rx(me, loc) || in_init_rw(me, loc);
++}
++
++static inline int in_core_rx(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rx &&
++ loc < (me->module_core_rx + me->core_size_rx));
++}
++
++static inline int in_core_rw(struct module *me, void *loc)
++{
++ return (loc >= me->module_core_rw &&
++ loc < (me->module_core_rw + me->core_size_rw));
+ }
+
+ static inline int in_core(struct module *me, void *loc)
+ {
+- return (loc >= me->module_core &&
+- loc <= (me->module_core + me->core_size));
++ return in_core_rx(me, loc) || in_core_rw(me, loc);
+ }
+
+ static inline int in_local(struct module *me, void *loc)
+@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
+ }
+
+ /* align things a bit */
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.got_offset = me->core_size;
+- me->core_size += gots * sizeof(struct got_entry);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += gots * sizeof(struct got_entry);
+
+- me->core_size = ALIGN(me->core_size, 16);
+- me->arch.fdesc_offset = me->core_size;
+- me->core_size += fdescs * sizeof(Elf_Fdesc);
++ me->core_size_rw = ALIGN(me->core_size_rw, 16);
++ me->arch.fdesc_offset = me->core_size_rw;
++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
+
+ me->arch.got_max = gots;
+ me->arch.fdesc_max = fdescs;
+@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+
+ BUG_ON(value == 0);
+
+- got = me->module_core + me->arch.got_offset;
++ got = me->module_core_rw + me->arch.got_offset;
+ for (i = 0; got[i].addr; i++)
+ if (got[i].addr == value)
+ goto out;
+@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
+ #ifdef CONFIG_64BIT
+ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+ {
+- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
+
+ if (!value) {
+ printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
+@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
+
+ /* Create new one */
+ fdesc->addr = value;
+- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+ return (Elf_Addr)fdesc;
+ }
+ #endif /* CONFIG_64BIT */
+@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
+
+ table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
+ end = table + sechdrs[me->arch.unwind_section].sh_size;
+- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
+
+ DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
+ me->arch.unwind_section, table, end, gp);
+diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
+index cb71f3d..306f0c0 100644
+--- a/arch/parisc/kernel/setup.c
++++ b/arch/parisc/kernel/setup.c
+@@ -68,7 +68,8 @@ void __init setup_cmdline(char **cmdline_p)
+ /* called from hpux boot loader */
+ boot_command_line[0] = '\0';
+ } else {
+- strcpy(boot_command_line, (char *)__va(boot_args[1]));
++ strlcpy(boot_command_line, (char *)__va(boot_args[1]),
++ COMMAND_LINE_SIZE);
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
+diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
+index 9147391..1c99b82 100644
+--- a/arch/parisc/kernel/sys_parisc.c
++++ b/arch/parisc/kernel/sys_parisc.c
+@@ -33,9 +33,11 @@
+ #include <linux/utsname.h>
+ #include <linux/personality.h>
+
+-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
++static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
++ unsigned long flags)
+ {
+ struct vm_area_struct *vma;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ addr = PAGE_ALIGN(addr);
+
+@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ addr = vma->vm_end;
+ }
+@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
+ return offset & 0x3FF000;
+ }
+
+-static unsigned long get_shared_area(struct address_space *mapping,
+- unsigned long addr, unsigned long len, unsigned long pgoff)
++static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
++ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ struct vm_area_struct *vma;
+ int offset = mapping ? get_offset(mapping) : 0;
++ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ addr = DCACHE_ALIGN(addr - offset) + offset;
+
+@@ -79,7 +82,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vma || addr + len <= vma->vm_start)
++ if (check_heap_stack_gap(vma, &addr, len, rand_offset))
+ return addr;
+ addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
+ if (addr < vma->vm_end) /* handle wraparound */
+@@ -98,14 +101,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ if (flags & MAP_FIXED)
+ return addr;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (filp) {
+- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
++ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
+ } else if(flags & MAP_SHARED) {
+- addr = get_shared_area(NULL, addr, len, pgoff);
++ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
+ } else {
+- addr = get_unshared_area(addr, len);
++ addr = get_unshared_area(filp, addr, len, flags);
+ }
+ return addr;
+ }
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 8b58bf0..7afff03 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index c6afbfc..c5839f6 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -15,6 +15,7 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int *)addr);
++ err |= get_user(bv, (unsigned int *)(addr+4));
++ err |= get_user(ldw2, (unsigned int *)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int fixup_exception(struct pt_regs *regs)
+ {
+ const struct exception_table_entry *fix;
+@@ -192,8 +303,33 @@ good_area:
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index c107b74..409dc0f 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
+ KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
+ CPP = $(CC) -E $(KBUILD_CFLAGS)
+
++cflags-y += -Wno-sign-compare -Wno-extra
++
+ CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
+
+ ifeq ($(CONFIG_PPC64),y)
+diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
+index 81de6eb..d5d0e24 100644
+--- a/arch/powerpc/include/asm/cache.h
++++ b/arch/powerpc/include/asm/cache.h
+@@ -3,6 +3,7 @@
+
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+
+ /* bytes per L1 cache line */
+ #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+@@ -18,7 +19,7 @@
+ #define L1_CACHE_SHIFT 7
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
+index 6d94d27..50d4cad 100644
+--- a/arch/powerpc/include/asm/device.h
++++ b/arch/powerpc/include/asm/device.h
+@@ -14,7 +14,7 @@ struct dev_archdata {
+ struct device_node *of_node;
+
+ /* DMA operations on that device */
+- struct dma_map_ops *dma_ops;
++ const struct dma_map_ops *dma_ops;
+
+ /*
+ * When an iommu is in use, dma_data is used as a ptr to the base of the
+diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
+index e281dae..2b8a784 100644
+--- a/arch/powerpc/include/asm/dma-mapping.h
++++ b/arch/powerpc/include/asm/dma-mapping.h
+@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
+ #ifdef CONFIG_PPC64
+ extern struct dma_map_ops dma_iommu_ops;
+ #endif
+-extern struct dma_map_ops dma_direct_ops;
++extern const struct dma_map_ops dma_direct_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ /* We don't handle the NULL dev case for ISA for now. We could
+ * do it via an out of line call but it is not needed for now. The
+@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ return dev->archdata.dma_ops;
+ }
+
+-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
++static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
+ {
+ dev->archdata.dma_ops = ops;
+ }
+@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
+
+ static inline int dma_supported(struct device *dev, u64 mask)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (unlikely(dma_ops == NULL))
+ return 0;
+@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
+
+ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (unlikely(dma_ops == NULL))
+ return -EIO;
+@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ BUG_ON(!dma_ops);
+@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ BUG_ON(!dma_ops);
+
+@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
+
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
++ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dev, dma_addr);
+diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
+index 5698502..5db093c 100644
+--- a/arch/powerpc/include/asm/elf.h
++++ b/arch/powerpc/include/asm/elf.h
+@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+-extern unsigned long randomize_et_dyn(unsigned long base);
+-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
++#define ELF_ET_DYN_BASE (0x20000000)
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
++
++#ifdef __powerpc64__
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
++#else
++#define PAX_DELTA_MMAP_LEN 15
++#define PAX_DELTA_STACK_LEN 15
++#endif
++#endif
+
+ /*
+ * Our registers are always unsigned longs, whether we're a 32 bit
+@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ (0x7ff >> (PAGE_SHIFT - 12)) : \
+ (0x3ffff >> (PAGE_SHIFT - 12)))
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* __KERNEL__ */
+
+ /*
+diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
+index edfc980..1766f59 100644
+--- a/arch/powerpc/include/asm/iommu.h
++++ b/arch/powerpc/include/asm/iommu.h
+@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
+ extern void iommu_init_early_dart(void);
+ extern void iommu_init_early_pasemi(void);
+
++/* dma-iommu.c */
++extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
++
+ #ifdef CONFIG_PCI
+ extern void pci_iommu_init(void);
+ extern void pci_direct_iommu_init(void);
+diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
+index 9163695..5a00112 100644
+--- a/arch/powerpc/include/asm/kmap_types.h
++++ b/arch/powerpc/include/asm/kmap_types.h
+@@ -26,6 +26,7 @@ enum km_type {
+ KM_SOFTIRQ1,
+ KM_PPC_SYNC_PAGE,
+ KM_PPC_SYNC_ICACHE,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
+index ff24254..fe45b21 100644
+--- a/arch/powerpc/include/asm/page.h
++++ b/arch/powerpc/include/asm/page.h
+@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
+ * and needs to be executable. This means the whole heap ends
+ * up being executable.
+ */
+-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_DATA_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
+ #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+ #endif
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ #undef STRICT_MM_TYPECHECKS
+diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
+index 3f17b83..1f9e766 100644
+--- a/arch/powerpc/include/asm/page_64.h
++++ b/arch/powerpc/include/asm/page_64.h
+@@ -180,15 +180,18 @@ do { \
+ * stack by default, so in the absense of a PT_GNU_STACK program header
+ * we turn execute permission off.
+ */
+-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
+- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++#define VM_STACK_DEFAULT_FLAGS32 \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ #define VM_STACK_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_32BIT) ? \
+ VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
++#endif
+
+ #include <asm-generic/getorder.h>
+
+diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
+index b5ea626..40308222 100644
+--- a/arch/powerpc/include/asm/pci.h
++++ b/arch/powerpc/include/asm/pci.h
+@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+ }
+
+ #ifdef CONFIG_PCI
+-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
+-extern struct dma_map_ops *get_pci_dma_ops(void);
++extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
++extern const struct dma_map_ops *get_pci_dma_ops(void);
+ #else /* CONFIG_PCI */
+ #define set_pci_dma_ops(d)
+ #define get_pci_dma_ops() NULL
+diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
+index e6f069c..6c7a1f8 100644
+--- a/arch/powerpc/include/asm/pgalloc-64.h
++++ b/arch/powerpc/include/asm/pgalloc-64.h
+@@ -37,6 +37,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ #ifndef CONFIG_PPC_64K_PAGES
+
+ #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
++#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
+
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+@@ -54,6 +55,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ pud_set(pud, (unsigned long)pmd);
+ }
+
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ pud_populate(mm, pud, pmd);
++}
++
+ #define pmd_populate(mm, pmd, pte_page) \
+ pmd_populate_kernel(mm, pmd, page_address(pte_page))
+ #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
+@@ -63,6 +69,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ #else /* CONFIG_PPC_64K_PAGES */
+
+ #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+ pte_t *pte)
+diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
+index 2a5da06..d65bea2 100644
+--- a/arch/powerpc/include/asm/pgtable.h
++++ b/arch/powerpc/include/asm/pgtable.h
+@@ -2,6 +2,7 @@
+ #define _ASM_POWERPC_PGTABLE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #ifndef __ASSEMBLY__
+ #include <asm/processor.h> /* For TASK_SIZE */
+ #include <asm/mmu.h>
+diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
+index 4aad413..85d86bf 100644
+--- a/arch/powerpc/include/asm/pte-hash32.h
++++ b/arch/powerpc/include/asm/pte-hash32.h
+@@ -21,6 +21,7 @@
+ #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
+ #define _PAGE_USER 0x004 /* usermode access allowed */
+ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
++#define _PAGE_EXEC _PAGE_GUARDED
+ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+ #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
+index 8c34149..78f425a 100644
+--- a/arch/powerpc/include/asm/ptrace.h
++++ b/arch/powerpc/include/asm/ptrace.h
+@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
+ } while(0)
+
+ struct task_struct;
+-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
++extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
+ extern int ptrace_put_reg(struct task_struct *task, int regno,
+ unsigned long data);
+
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index 6ce44dd..e054dcc 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -191,6 +191,7 @@
+ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
+ #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
+ #define DSISR_NOHPTE 0x40000000 /* no translation found */
++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
+ #define DSISR_PROTFAULT 0x08000000 /* protection fault */
+ #define DSISR_ISSTORE 0x02000000 /* access was a store */
+ #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
+diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
+index 8979d4c..d2fd0d3 100644
+--- a/arch/powerpc/include/asm/swiotlb.h
++++ b/arch/powerpc/include/asm/swiotlb.h
+@@ -13,7 +13,7 @@
+
+ #include <linux/swiotlb.h>
+
+-extern struct dma_map_ops swiotlb_dma_ops;
++extern const struct dma_map_ops swiotlb_dma_ops;
+
+ static inline void dma_mark_clean(void *addr, size_t size) {}
+
+diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
+index 094a12a..877a60a 100644
+--- a/arch/powerpc/include/asm/system.h
++++ b/arch/powerpc/include/asm/system.h
+@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
+ #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+ #endif
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ /* Used in very early kernel initialization. */
+ extern unsigned long reloc_offset(void);
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index aa9d383..0380a05 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -110,7 +110,9 @@ static inline struct thread_info *current_thread_info(void)
+ #define TIF_NOERROR 12 /* Force successful syscall return */
+ #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
+ #define TIF_FREEZE 14 /* Freezing for suspend */
+-#define TIF_RUNLATCH 15 /* Is the runlatch enabled? */
++/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
++#define TIF_GRSEC_SETXID 15 /* update credentials on syscall entry/exit */
++#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
+
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+@@ -128,7 +130,10 @@ static inline struct thread_info *current_thread_info(void)
+ #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+ #define _TIF_FREEZE (1<<TIF_FREEZE)
+ #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
+-#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
++
++#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT| \
++ _TIF_SECCOMP|_TIF_GRSEC_SETXID)
+
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_RESUME)
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index bd0fb84..1f2d065 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -327,52 +327,6 @@ do { \
+ extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+-#ifndef __powerpc64__
+-
+-static inline unsigned long copy_from_user(void *to,
+- const void __user *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_READ, from, n))
+- return __copy_tofrom_user((__force void __user *)to, from, n);
+- if ((unsigned long)from < TASK_SIZE) {
+- over = (unsigned long)from + n - TASK_SIZE;
+- return __copy_tofrom_user((__force void __user *)to, from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-static inline unsigned long copy_to_user(void __user *to,
+- const void *from, unsigned long n)
+-{
+- unsigned long over;
+-
+- if (access_ok(VERIFY_WRITE, to, n))
+- return __copy_tofrom_user(to, (__force void __user *)from, n);
+- if ((unsigned long)to < TASK_SIZE) {
+- over = (unsigned long)to + n - TASK_SIZE;
+- return __copy_tofrom_user(to, (__force void __user *)from,
+- n - over) + over;
+- }
+- return n;
+-}
+-
+-#else /* __powerpc64__ */
+-
+-#define __copy_in_user(to, from, size) \
+- __copy_tofrom_user((to), (from), (size))
+-
+-extern unsigned long copy_from_user(void *to, const void __user *from,
+- unsigned long n);
+-extern unsigned long copy_to_user(void __user *to, const void *from,
+- unsigned long n);
+-extern unsigned long copy_in_user(void __user *to, const void __user *from,
+- unsigned long n);
+-
+-#endif /* __powerpc64__ */
+-
+ static inline unsigned long __copy_from_user_inatomic(void *to,
+ const void __user *from, unsigned long n)
+ {
+@@ -396,6 +350,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
+
+@@ -422,6 +380,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
+ if (ret == 0)
+ return 0;
+ }
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+ }
+
+@@ -439,6 +401,92 @@ static inline unsigned long __copy_to_user(void __user *to,
+ return __copy_to_user_inatomic(to, from, size);
+ }
+
++#ifndef __powerpc64__
++
++static inline unsigned long __must_check copy_from_user(void *to,
++ const void __user *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_READ, from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++ return __copy_tofrom_user((__force void __user *)to, from, n);
++ }
++ if ((unsigned long)from < TASK_SIZE) {
++ over = (unsigned long)from + n - TASK_SIZE;
++ if (!__builtin_constant_p(n - over))
++ check_object_size(to, n - over, false);
++ return __copy_tofrom_user((__force void __user *)to, from,
++ n - over) + over;
++ }
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to,
++ const void *from, unsigned long n)
++{
++ unsigned long over;
++
++ if ((long)n < 0)
++ return n;
++
++ if (access_ok(VERIFY_WRITE, to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ return __copy_tofrom_user(to, (__force void __user *)from, n);
++ }
++ if ((unsigned long)to < TASK_SIZE) {
++ over = (unsigned long)to + n - TASK_SIZE;
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n - over, true);
++ return __copy_tofrom_user(to, (__force void __user *)from,
++ n - over) + over;
++ }
++ return n;
++}
++
++#else /* __powerpc64__ */
++
++#define __copy_in_user(to, from, size) \
++ __copy_tofrom_user((to), (from), (size))
++
++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
++
++ if (likely(access_ok(VERIFY_READ, from, n)))
++ n = __copy_from_user(to, from, n);
++ else
++ memset(to, 0, n);
++ return n;
++}
++
++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ if ((long)n < 0 || n > INT_MAX)
++ return n;
++
++ if (likely(access_ok(VERIFY_WRITE, to, n))) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++ n = __copy_to_user(to, from, n);
++ }
++ return n;
++}
++
++extern unsigned long copy_in_user(void __user *to, const void __user *from,
++ unsigned long n);
++
++#endif /* __powerpc64__ */
++
+ extern unsigned long __clear_user(void __user *addr, unsigned long size);
+
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
+index bb37b1d..01fe9ce 100644
+--- a/arch/powerpc/kernel/cacheinfo.c
++++ b/arch/powerpc/kernel/cacheinfo.c
+@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
+ &cache_assoc_attr,
+ };
+
+-static struct sysfs_ops cache_index_ops = {
++static const struct sysfs_ops cache_index_ops = {
+ .show = cache_index_show,
+ };
+
+diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
+index 37771a5..648530c 100644
+--- a/arch/powerpc/kernel/dma-iommu.c
++++ b/arch/powerpc/kernel/dma-iommu.c
+@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ }
+
+ /* We support DMA to/from any memory page via the iommu */
+-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
++int dma_iommu_dma_supported(struct device *dev, u64 mask)
+ {
+ struct iommu_table *tbl = get_iommu_table_base(dev);
+
+diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
+index e96cbbd..bdd6d41 100644
+--- a/arch/powerpc/kernel/dma-swiotlb.c
++++ b/arch/powerpc/kernel/dma-swiotlb.c
+@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
+ * map_page, and unmap_page on highmem, use normal dma_ops
+ * for everything else.
+ */
+-struct dma_map_ops swiotlb_dma_ops = {
++const struct dma_map_ops swiotlb_dma_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = swiotlb_map_sg_attrs,
+diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
+index 6215062..ebea59c 100644
+--- a/arch/powerpc/kernel/dma.c
++++ b/arch/powerpc/kernel/dma.c
+@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
+ }
+ #endif
+
+-struct dma_map_ops dma_direct_ops = {
++const struct dma_map_ops dma_direct_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = dma_direct_map_sg,
+diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
+index 24dcc0e..a300455 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -455,6 +455,7 @@ storage_fault_common:
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ mr r4,r14
+ mr r5,r15
+ ld r14,PACA_EXGEN+EX_R14(r13)
+@@ -464,8 +465,7 @@ storage_fault_common:
+ cmpdi r3,0
+ bne- 1f
+ b .ret_from_except_lite
+-1: bl .save_nvgprs
+- mr r5,r3
++1: mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ld r4,_DAR(r1)
+ bl .bad_page_fault
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 1808876..9fd206a 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -818,10 +818,10 @@ handle_page_fault:
+ 11: ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
++ bl .save_nvgprs
+ bl .do_page_fault
+ cmpdi r3,0
+ beq+ 13f
+- bl .save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
+index a4c8b38..1b09ad9 100644
+--- a/arch/powerpc/kernel/ibmebus.c
++++ b/arch/powerpc/kernel/ibmebus.c
+@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
+ return 1;
+ }
+
+-static struct dma_map_ops ibmebus_dma_ops = {
++static const struct dma_map_ops ibmebus_dma_ops = {
+ .alloc_coherent = ibmebus_alloc_coherent,
+ .free_coherent = ibmebus_free_coherent,
+ .map_sg = ibmebus_map_sg,
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index 8564a41..67f3471 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -490,9 +490,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
+ host->ops = ops;
+ host->of_node = of_node_get(of_node);
+
+- if (host->ops->match == NULL)
+- host->ops->match = default_irq_host_match;
+-
+ spin_lock_irqsave(&irq_big_lock, flags);
+
+ /* If it's a legacy controller, check for duplicates and
+@@ -567,7 +564,12 @@ struct irq_host *irq_find_host(struct device_node *node)
+ */
+ spin_lock_irqsave(&irq_big_lock, flags);
+ list_for_each_entry(h, &irq_hosts, link)
+- if (h->ops->match(h, node)) {
++ if (h->ops->match) {
++ if (h->ops->match(h, node)) {
++ found = h;
++ break;
++ }
++ } else if (default_irq_host_match(h, node)) {
+ found = h;
+ break;
+ }
+diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
+index 641c74b..8339ad7 100644
+--- a/arch/powerpc/kernel/kgdb.c
++++ b/arch/powerpc/kernel/kgdb.c
+@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
+ if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
+ return 0;
+
+- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
++ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
+ regs->nip += 4;
+
+ return 1;
+@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ /*
+ * Global data
+ */
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
+ };
+
+diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
+index 477c663..4f50234 100644
+--- a/arch/powerpc/kernel/module.c
++++ b/arch/powerpc/kernel/module.c
+@@ -31,11 +31,24 @@
+
+ LIST_HEAD(module_bug_list);
+
++#ifdef CONFIG_PAX_KERNEXEC
+ void *module_alloc(unsigned long size)
+ {
+ if (size == 0)
+ return NULL;
+
++ return vmalloc(size);
++}
++
++void *module_alloc_exec(unsigned long size)
++#else
++void *module_alloc(unsigned long size)
++#endif
++
++{
++ if (size == 0)
++ return NULL;
++
+ return vmalloc_exec(size);
+ }
+
+@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
+ vfree(module_region);
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++void module_free_exec(struct module *mod, void *module_region)
++{
++ module_free(mod, module_region);
++}
++#endif
++
+ static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
+index 449a7e0..5cb1140 100644
+--- a/arch/powerpc/kernel/module_32.c
++++ b/arch/powerpc/kernel/module_32.c
+@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
+ me->arch.core_plt_section = i;
+ }
+ if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
+- printk("Module doesn't contain .plt or .init.plt sections.\n");
++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
+ return -ENOEXEC;
+ }
+
+@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
+
+ DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
+ /* Init, or core PLT? */
+- if (location >= mod->module_core
+- && location < mod->module_core + mod->core_size)
++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
+ entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
+- else
++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
+ entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
++ else {
++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
++ return ~0UL;
++ }
+
+ /* Find this entry, or if that fails, the next avail. entry */
+ while (entry->jump[0]) {
+diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
+index cadbed6..b9bbb00 100644
+--- a/arch/powerpc/kernel/pci-common.c
++++ b/arch/powerpc/kernel/pci-common.c
+@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
+ unsigned int ppc_pci_flags = 0;
+
+
+-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
++static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+
+-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
++void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
+ {
+ pci_dma_ops = dma_ops;
+ }
+
+-struct dma_map_ops *get_pci_dma_ops(void)
++const struct dma_map_ops *get_pci_dma_ops(void)
+ {
+ return pci_dma_ops;
+ }
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 7b816da..8d5c277 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
+ * Lookup NIP late so we have the best change of getting the
+ * above info out without failing
+ */
+- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
++ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
++ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
+ #endif
+ show_stack(current, (unsigned long *) regs->gpr[1]);
+ if (!user_mode(regs))
+@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ newsp = stack[0];
+ ip = stack[STACK_FRAME_LR_SAVE];
+ if (!firstframe || ip != lr) {
+- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
++ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ if ((ip == rth || ip == mrth) && curr_frame >= 0) {
+- printk(" (%pS)",
++ printk(" (%pA)",
+ (void *)current->ret_stack[curr_frame].ret);
+ curr_frame--;
+ }
+@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
+ struct pt_regs *regs = (struct pt_regs *)
+ (sp + STACK_FRAME_OVERHEAD);
+ lr = regs->link;
+- printk("--- Exception: %lx at %pS\n LR = %pS\n",
++ printk("--- Exception: %lx at %pA\n LR = %pA\n",
+ regs->trap, (void *)regs->nip, (void *)lr);
+ firstframe = 1;
+ }
+@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
+ }
+
+ #endif /* THREAD_SHIFT < PAGE_SHIFT */
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() & ~PAGE_MASK;
+- return sp & ~0xf;
+-}
+-
+-static inline unsigned long brk_rnd(void)
+-{
+- unsigned long rnd = 0;
+-
+- /* 8MB for 32bit, 1GB for 64bit */
+- if (is_32bit_task())
+- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+- else
+- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+-
+- return rnd << PAGE_SHIFT;
+-}
+-
+-unsigned long arch_randomize_brk(struct mm_struct *mm)
+-{
+- unsigned long base = mm->brk;
+- unsigned long ret;
+-
+-#ifdef CONFIG_PPC_STD_MMU_64
+- /*
+- * If we are using 1TB segments and we are allowed to randomise
+- * the heap, we can put it above 1TB so it is backed by a 1TB
+- * segment. Otherwise the heap will be in the bottom 1TB
+- * which always uses 256MB segments and this may result in a
+- * performance penalty.
+- */
+- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
+-#endif
+-
+- ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < mm->brk)
+- return mm->brk;
+-
+- return ret;
+-}
+-
+-unsigned long randomize_et_dyn(unsigned long base)
+-{
+- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
+-
+- if (ret < base)
+- return base;
+-
+- return ret;
+-}
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index ef14988..8a37ddb 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
+ /*
+ * Get contents of register REGNO in task TASK.
+ */
+-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
++unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
+ {
+ if (task->thread.regs == NULL)
+ return -EIO;
+@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+
+ CHECK_FULL_REGS(child->thread.regs);
+ if (index < PT_FPR0) {
+- tmp = ptrace_get_reg(child, (int) index);
++ tmp = ptrace_get_reg(child, index);
+ } else {
+ flush_fp_to_thread(child);
+ tmp = ((unsigned long *)child->thread.fpr)
+@@ -1033,6 +1033,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ /*
+ * We must return the syscall number to actually look up in the table.
+ * This can be -1L to skip running any syscall at all.
+@@ -1043,6 +1047,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+
+ secure_computing(regs->gpr[0]);
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+@@ -1076,6 +1085,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
+ {
+ int step;
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (unlikely(current->audit_context))
+ audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
+ regs->result);
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index d670429..2bc59b2 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
+ /* Save user registers on the stack */
+ frame = &rt_sf->uc.uc_mcontext;
+ addr = frame;
+- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ if (save_user_regs(regs, frame, 0, 1))
+ goto badframe;
+ regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index 2fe6fc6..ada0d96 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
+ current->thread.fpscr.val = 0;
+
+ /* Set up to return from userspace. */
+- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
+ regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
+ } else {
+ err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
+diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
+index b97c2d6..dd01a6a 100644
+--- a/arch/powerpc/kernel/sys_ppc32.c
++++ b/arch/powerpc/kernel/sys_ppc32.c
+@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
+ if (oldlenp) {
+ if (!error) {
+ if (get_user(oldlen, oldlenp) ||
+- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
++ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
++ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
+ error = -EFAULT;
+ }
+- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
+ }
+ return error;
+ }
+diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
+index 3370e62..527c659 100644
+--- a/arch/powerpc/kernel/syscalls.c
++++ b/arch/powerpc/kernel/syscalls.c
+@@ -201,11 +201,11 @@ long ppc64_personality(unsigned long personality)
+ long ret;
+
+ if (personality(current->personality) == PER_LINUX32
+- && personality == PER_LINUX)
+- personality = PER_LINUX32;
++ && personality(personality) == PER_LINUX)
++ personality = (personality & ~PER_MASK) | PER_LINUX32;
+ ret = sys_personality(personality);
+- if (ret == PER_LINUX32)
+- ret = PER_LINUX;
++ if (personality(ret) == PER_LINUX32)
++ ret = (ret & ~PER_MASK) | PER_LINUX;
+ return ret;
+ }
+ #endif
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 6f0ae1a..e4b6a56 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
+ static inline void pmac_backlight_unblank(void) { }
+ #endif
+
++extern void gr_handle_kernel_exploit(void);
++
+ int die(const char *str, struct pt_regs *regs, long err)
+ {
+ static struct {
+@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
+ if (panic_on_oops)
+ panic("Fatal exception");
+
++ gr_handle_kernel_exploit();
++
+ oops_exit();
+ do_exit(err);
+
+diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
+index 137dc22..fe57a79 100644
+--- a/arch/powerpc/kernel/vdso.c
++++ b/arch/powerpc/kernel/vdso.c
+@@ -36,6 +36,7 @@
+ #include <asm/firmware.h>
+ #include <asm/vdso.h>
+ #include <asm/vdso_datapage.h>
++#include <asm/mman.h>
+
+ #include "setup.h"
+
+@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = VDSO32_MBASE;
+ #endif
+
+- current->mm->context.vdso_base = 0;
++ current->mm->context.vdso_base = ~0UL;
+
+ /* vDSO has a problem and was disabled, just don't "enable" it for the
+ * process
+@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+ vdso_base = get_unmapped_area(NULL, vdso_base,
+ (vdso_pages << PAGE_SHIFT) +
+ ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
+- 0, 0);
++ 0, MAP_PRIVATE | MAP_EXECUTABLE);
+ if (IS_ERR_VALUE(vdso_base)) {
+ rc = vdso_base;
+ goto fail_mmapsem;
+diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
+index 77f6421..829564a 100644
+--- a/arch/powerpc/kernel/vio.c
++++ b/arch/powerpc/kernel/vio.c
+@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
+ vio_cmo_dealloc(viodev, alloc_size);
+ }
+
+-struct dma_map_ops vio_dma_mapping_ops = {
++static const struct dma_map_ops vio_dma_mapping_ops = {
+ .alloc_coherent = vio_dma_iommu_alloc_coherent,
+ .free_coherent = vio_dma_iommu_free_coherent,
+ .map_sg = vio_dma_iommu_map_sg,
+ .unmap_sg = vio_dma_iommu_unmap_sg,
++ .dma_supported = dma_iommu_dma_supported,
+ .map_page = vio_dma_iommu_map_page,
+ .unmap_page = vio_dma_iommu_unmap_page,
+
+@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
+
+ static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
+ {
+- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
+ viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
+ }
+
+diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
+index 5eea6f3..5d10396 100644
+--- a/arch/powerpc/lib/usercopy_64.c
++++ b/arch/powerpc/lib/usercopy_64.c
+@@ -9,22 +9,6 @@
+ #include <linux/module.h>
+ #include <asm/uaccess.h>
+
+-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_READ, from, n)))
+- n = __copy_from_user(to, from, n);
+- else
+- memset(to, 0, n);
+- return n;
+-}
+-
+-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+- if (likely(access_ok(VERIFY_WRITE, to, n)))
+- n = __copy_to_user(to, from, n);
+- return n;
+-}
+-
+ unsigned long copy_in_user(void __user *to, const void __user *from,
+ unsigned long n)
+ {
+@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
+ return n;
+ }
+
+-EXPORT_SYMBOL(copy_from_user);
+-EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(copy_in_user);
+
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index e7dae82..877ce0d 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -30,6 +30,10 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/perf_event.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/unistd.h>
+
+ #include <asm/firmware.h>
+ #include <asm/page.h>
+@@ -40,6 +44,7 @@
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+ #include <asm/siginfo.h>
++#include <asm/ptrace.h>
+
+
+ #ifdef CONFIG_KPROBES
+@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int __user *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & DSISR_ISSTORE;
+ #else
+@@ -250,7 +282,7 @@ good_area:
+ * "undefined". Of those that can be set, this is the only
+ * one which seems bad.
+ */
+- if (error_code & 0x10000000)
++ if (error_code & DSISR_GUARDED)
+ /* Guarded storage error. */
+ goto bad_area;
+ #endif /* CONFIG_8xx */
+@@ -265,7 +297,7 @@ good_area:
+ * processors use the same I/D cache coherency mechanism
+ * as embedded.
+ */
+- if (error_code & DSISR_PROTFAULT)
++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
+ goto bad_area;
+ #endif /* CONFIG_PPC_STD_MMU */
+
+@@ -335,6 +367,23 @@ bad_area:
+ bad_area_nosemaphore:
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
++#ifdef CONFIG_PPC_STD_MMU
++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
++#else
++ if (is_exec && regs->nip == address) {
++#endif
++ switch (pax_handle_fetch_fault(regs)) {
++ }
++
++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
++ do_group_exit(SIGKILL);
++ }
++ }
++#endif
++
+ _exception(SIGSEGV, regs, code, address);
+ return 0;
+ }
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 5973631..ad617af 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
+ {
+ unsigned long lmb_next_region_start_pfn,
+ lmb_region_max_pfn;
+- int i;
++ unsigned int i;
+
+ for (i = 0; i < lmb.memory.cnt - 1; i++) {
+ lmb_region_max_pfn =
+diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
+index 0d957a4..eae383e 100644
+--- a/arch/powerpc/mm/mmap_64.c
++++ b/arch/powerpc/mm/mmap_64.c
+@@ -65,6 +65,10 @@ static unsigned long mmap_rnd(void)
+ {
+ unsigned long rnd = 0;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE) {
+ /* 8MB for 32bit, 1GB for 64bit */
+ if (is_32bit_task())
+@@ -99,10 +103,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
+index ba51948..9190915 100644
+--- a/arch/powerpc/mm/slice.c
++++ b/arch/powerpc/mm/slice.c
+@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
+ if ((mm->task_size - len) < addr)
+ return 0;
+ vma = find_vma(mm, addr);
+- return (!vma || (addr + len) <= vma->vm_start);
++ return check_heap_stack_gap(vma, &addr, len, 0);
+ }
+
+ static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+@@ -256,7 +256,7 @@ full_search:
+ addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ continue;
+ }
+- if (!vma || addr + len <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, &addr, len, 0)) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
+ }
+ }
+
+- addr = mm->mmap_base;
+- while (addr > len) {
++ if (mm->mmap_base < len)
++ addr = -ENOMEM;
++ else
++ addr = mm->mmap_base - len;
++
++ while (!IS_ERR_VALUE(addr)) {
+ /* Go down by chunk size */
+- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
++ addr = _ALIGN_DOWN(addr, 1ul << pshift);
+
+ /* Check for hit with different page size */
+ mask = slice_range_to_mask(addr, len);
+@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (!vma || (addr + len) <= vma->vm_start) {
++ if (check_heap_stack_gap(vma, &addr, len, 0)) {
+ /* remember the address as a hint for next time */
+ if (use_cache)
+ mm->free_area_cache = addr;
+@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start;
++ addr = skip_heap_stack_gap(vma, len, 0);
+ }
+
+ /*
+@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+ if (fixed && addr > (mm->task_size - len))
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
++ addr = 0;
++#endif
++
+ /* If hint, make sure it matches our alignment restrictions */
+ if (!fixed && addr) {
+ addr = _ALIGN_UP(addr, 1ul << pshift);
+diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
+index b5c753d..8f01abe 100644
+--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
++++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
+@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
+ lite5200_pm_target_state = PM_SUSPEND_ON;
+ }
+
+-static struct platform_suspend_ops lite5200_pm_ops = {
++static const struct platform_suspend_ops lite5200_pm_ops = {
+ .valid = lite5200_pm_valid,
+ .begin = lite5200_pm_begin,
+ .prepare = lite5200_pm_prepare,
+diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+index a55b0b6..478c18e 100644
+--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
++++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
+ iounmap(mbar);
+ }
+
+-static struct platform_suspend_ops mpc52xx_pm_ops = {
++static const struct platform_suspend_ops mpc52xx_pm_ops = {
+ .valid = mpc52xx_pm_valid,
+ .prepare = mpc52xx_pm_prepare,
+ .enter = mpc52xx_pm_enter,
+diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
+index 08e65fc..643d3ac 100644
+--- a/arch/powerpc/platforms/83xx/suspend.c
++++ b/arch/powerpc/platforms/83xx/suspend.c
+@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
+ return ret;
+ }
+
+-static struct platform_suspend_ops mpc83xx_suspend_ops = {
++static const struct platform_suspend_ops mpc83xx_suspend_ops = {
+ .valid = mpc83xx_suspend_valid,
+ .begin = mpc83xx_suspend_begin,
+ .enter = mpc83xx_suspend_enter,
+diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
+index ca5bfdf..1602e09 100644
+--- a/arch/powerpc/platforms/cell/iommu.c
++++ b/arch/powerpc/platforms/cell/iommu.c
+@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
+
+ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
+
+-struct dma_map_ops dma_iommu_fixed_ops = {
++const struct dma_map_ops dma_iommu_fixed_ops = {
+ .alloc_coherent = dma_fixed_alloc_coherent,
+ .free_coherent = dma_fixed_free_coherent,
+ .map_sg = dma_fixed_map_sg,
+diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
+index e34b305..20e48ec 100644
+--- a/arch/powerpc/platforms/ps3/system-bus.c
++++ b/arch/powerpc/platforms/ps3/system-bus.c
+@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
+ return mask >= DMA_BIT_MASK(32);
+ }
+
+-static struct dma_map_ops ps3_sb_dma_ops = {
++static const struct dma_map_ops ps3_sb_dma_ops = {
+ .alloc_coherent = ps3_alloc_coherent,
+ .free_coherent = ps3_free_coherent,
+ .map_sg = ps3_sb_map_sg,
+@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
+ .unmap_page = ps3_unmap_page,
+ };
+
+-static struct dma_map_ops ps3_ioc0_dma_ops = {
++static const struct dma_map_ops ps3_ioc0_dma_ops = {
+ .alloc_coherent = ps3_alloc_coherent,
+ .free_coherent = ps3_free_coherent,
+ .map_sg = ps3_ioc0_map_sg,
+diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
+index f0e6f28..60d53ed 100644
+--- a/arch/powerpc/platforms/pseries/Kconfig
++++ b/arch/powerpc/platforms/pseries/Kconfig
+@@ -2,6 +2,8 @@ config PPC_PSERIES
+ depends on PPC64 && PPC_BOOK3S
+ bool "IBM pSeries & new (POWER5-based) iSeries"
+ select MPIC
++ select PCI_MSI
++ select XICS
+ select PPC_I8259
+ select PPC_RTAS
+ select RTAS_ERROR_LOGGING
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index aca7fff..76c2b6b 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -197,28 +197,26 @@ config AUDIT_ARCH
+
+ config S390_SWITCH_AMODE
+ bool "Switch kernel/user addressing modes"
++ default y
+ help
+ This option allows to switch the addressing modes of kernel and user
+- space. The kernel parameter switch_amode=on will enable this feature,
+- default is disabled. Enabling this (via kernel parameter) on machines
+- earlier than IBM System z9-109 EC/BC will reduce system performance.
++ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
++ will reduce system performance.
+
+ Note that this option will also be selected by selecting the execute
+- protection option below. Enabling the execute protection via the
+- noexec kernel parameter will also switch the addressing modes,
+- independent of the switch_amode kernel parameter.
++ protection option below. Enabling the execute protection will also
++ switch the addressing modes, independent of this option.
+
+
+ config S390_EXEC_PROTECT
+ bool "Data execute protection"
++ default y
+ select S390_SWITCH_AMODE
+ help
+ This option allows to enable a buffer overflow protection for user
+ space programs and it also selects the addressing mode option above.
+- The kernel parameter noexec=on will enable this feature and also
+- switch the addressing modes, default is disabled. Enabling this (via
+- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
+- will reduce system performance.
++ Enabling this on machines earlier than IBM System z9-109 EC/BC will
++ reduce system performance.
+
+ comment "Code generation options"
+
+diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
+index ae7c8f9..3f01a0c 100644
+--- a/arch/s390/include/asm/atomic.h
++++ b/arch/s390/include/asm/atomic.h
+@@ -362,6 +362,16 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+ #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
++#define atomic64_read_unchecked(v) atomic64_read(v)
++#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
++#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
++#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
++#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
++#define atomic64_inc_unchecked(v) atomic64_inc(v)
++#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
++#define atomic64_dec_unchecked(v) atomic64_dec(v)
++#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
++
+ #define smp_mb__before_atomic_dec() smp_mb()
+ #define smp_mb__after_atomic_dec() smp_mb()
+ #define smp_mb__before_atomic_inc() smp_mb()
+diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
+index 9b86681..c5140db 100644
+--- a/arch/s390/include/asm/cache.h
++++ b/arch/s390/include/asm/cache.h
+@@ -11,8 +11,10 @@
+ #ifndef __ARCH_S390_CACHE_H
+ #define __ARCH_S390_CACHE_H
+
+-#define L1_CACHE_BYTES 256
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 8
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
+diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
+index e885442..5b0c9aa3 100644
+--- a/arch/s390/include/asm/elf.h
++++ b/arch/s390/include/asm/elf.h
+@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
+ that it will "exec", and that there is sufficient room for the brk. */
+ #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. */
+
+@@ -182,7 +189,8 @@ extern char elf_platform[];
+ #define ELF_PLATFORM (elf_platform)
+
+ #ifndef __s390x__
+-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
++#define SET_PERSONALITY(ex) \
++ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+ #else /* __s390x__ */
+ #define SET_PERSONALITY(ex) \
+ do { \
+diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
+index e37478e..9ce0e9f 100644
+--- a/arch/s390/include/asm/setup.h
++++ b/arch/s390/include/asm/setup.h
+@@ -50,13 +50,13 @@ extern unsigned long memory_end;
+ void detect_memory_layout(struct mem_chunk chunk[]);
+
+ #ifdef CONFIG_S390_SWITCH_AMODE
+-extern unsigned int switch_amode;
++#define switch_amode (1)
+ #else
+ #define switch_amode (0)
+ #endif
+
+ #ifdef CONFIG_S390_EXEC_PROTECT
+-extern unsigned int s390_noexec;
++#define s390_noexec (1)
+ #else
+ #define s390_noexec (0)
+ #endif
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index 8377e91..e28e6f1 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -232,6 +232,10 @@ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
+ static inline unsigned long __must_check
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n) && (n <= 256))
+ return uaccess.copy_from_user_small(n, from, to);
+ else
+@@ -283,6 +290,10 @@ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else
+diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
+index 639380a..72e3c02 100644
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+
+ /* Increase core size by size of got & plt and set start
+ offsets for got and plt. */
+- me->core_size = ALIGN(me->core_size, 4);
+- me->arch.got_offset = me->core_size;
+- me->core_size += me->arch.got_size;
+- me->arch.plt_offset = me->core_size;
+- me->core_size += me->arch.plt_size;
++ me->core_size_rw = ALIGN(me->core_size_rw, 4);
++ me->arch.got_offset = me->core_size_rw;
++ me->core_size_rw += me->arch.got_size;
++ me->arch.plt_offset = me->core_size_rx;
++ me->core_size_rx += me->arch.plt_size;
+ return 0;
+ }
+
+@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ if (info->got_initialized == 0) {
+ Elf_Addr *gotent;
+
+- gotent = me->module_core + me->arch.got_offset +
++ gotent = me->module_core_rw + me->arch.got_offset +
+ info->got_offset;
+ *gotent = val;
+ info->got_initialized = 1;
+@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ else if (r_type == R_390_GOTENT ||
+ r_type == R_390_GOTPLTENT)
+ *(unsigned int *) loc =
+- (val + (Elf_Addr) me->module_core - loc) >> 1;
++ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
+ else if (r_type == R_390_GOT64 ||
+ r_type == R_390_GOTPLT64)
+ *(unsigned long *) loc = val;
+@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
+ if (info->plt_initialized == 0) {
+ unsigned int *ip;
+- ip = me->module_core + me->arch.plt_offset +
++ ip = me->module_core_rx + me->arch.plt_offset +
+ info->plt_offset;
+ #ifndef CONFIG_64BIT
+ ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
+@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ val - loc + 0xffffUL < 0x1ffffeUL) ||
+ (r_type == R_390_PLT32DBL &&
+ val - loc + 0xffffffffULL < 0x1fffffffeULL)))
+- val = (Elf_Addr) me->module_core +
++ val = (Elf_Addr) me->module_core_rx +
+ me->arch.plt_offset +
+ info->plt_offset;
+ val += rela->r_addend - loc;
+@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ case R_390_GOTOFF32: /* 32 bit offset to GOT. */
+ case R_390_GOTOFF64: /* 64 bit offset to GOT. */
+ val = val + rela->r_addend -
+- ((Elf_Addr) me->module_core + me->arch.got_offset);
++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
+ if (r_type == R_390_GOTOFF16)
+ *(unsigned short *) loc = val;
+ else if (r_type == R_390_GOTOFF32)
+@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
+ break;
+ case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
+ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
+- val = (Elf_Addr) me->module_core + me->arch.got_offset +
++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
+ rela->r_addend - loc;
+ if (r_type == R_390_GOTPC)
+ *(unsigned int *) loc = val;
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 358e545..051e4f4 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -307,9 +307,6 @@ static int __init early_parse_mem(char *p)
+ early_param("mem", early_parse_mem);
+
+ #ifdef CONFIG_S390_SWITCH_AMODE
+-unsigned int switch_amode = 0;
+-EXPORT_SYMBOL_GPL(switch_amode);
+-
+ static int set_amode_and_uaccess(unsigned long user_amode,
+ unsigned long user32_amode)
+ {
+@@ -335,17 +332,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
+ return 0;
+ }
+ }
+-
+-/*
+- * Switch kernel/user addressing modes?
+- */
+-static int __init early_parse_switch_amode(char *p)
+-{
+- switch_amode = 1;
+- return 0;
+-}
+-early_param("switch_amode", early_parse_switch_amode);
+-
+ #else /* CONFIG_S390_SWITCH_AMODE */
+ static inline int set_amode_and_uaccess(unsigned long user_amode,
+ unsigned long user32_amode)
+@@ -354,24 +340,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
+ }
+ #endif /* CONFIG_S390_SWITCH_AMODE */
+
+-#ifdef CONFIG_S390_EXEC_PROTECT
+-unsigned int s390_noexec = 0;
+-EXPORT_SYMBOL_GPL(s390_noexec);
+-
+-/*
+- * Enable execute protection?
+- */
+-static int __init early_parse_noexec(char *p)
+-{
+- if (!strncmp(p, "off", 3))
+- return 0;
+- switch_amode = 1;
+- s390_noexec = 1;
+- return 0;
+-}
+-early_param("noexec", early_parse_noexec);
+-#endif /* CONFIG_S390_EXEC_PROTECT */
+-
+ static void setup_addressing_mode(void)
+ {
+ if (s390_noexec) {
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index 0ab74ae..c8b68f9 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ */
+ if (mmap_is_legacy()) {
+ mm->mmap_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+ mm->mmap_base = mmap_base();
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = s390_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
+index ae3d59f..f65f075 100644
+--- a/arch/score/include/asm/cache.h
++++ b/arch/score/include/asm/cache.h
+@@ -1,7 +1,9 @@
+ #ifndef _ASM_SCORE_CACHE_H
+ #define _ASM_SCORE_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 4
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif /* _ASM_SCORE_CACHE_H */
+diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
+index 589d5c7..669e274 100644
+--- a/arch/score/include/asm/system.h
++++ b/arch/score/include/asm/system.h
+@@ -17,7 +17,7 @@ do { \
+ #define finish_arch_switch(prev) do {} while (0)
+
+ typedef void (*vi_handler_t)(void);
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) (x)
+
+ #define mb() barrier()
+ #define rmb() barrier()
+diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
+index 25d0803..d6c8e36 100644
+--- a/arch/score/kernel/process.c
++++ b/arch/score/kernel/process.c
+@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
+
+ return task_pt_regs(task)->cp0_epc;
+ }
+-
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- return sp;
+-}
+diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
+index d936c1a..304a252 100644
+--- a/arch/sh/boards/mach-hp6xx/pm.c
++++ b/arch/sh/boards/mach-hp6xx/pm.c
+@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
+ return 0;
+ }
+
+-static struct platform_suspend_ops hp6x0_pm_ops = {
++static const struct platform_suspend_ops hp6x0_pm_ops = {
+ .enter = hp6x0_pm_enter,
+ .valid = suspend_valid_only_mem,
+ };
+diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
+index 02df18e..ae3a793 100644
+--- a/arch/sh/include/asm/cache.h
++++ b/arch/sh/include/asm/cache.h
+@@ -9,10 +9,11 @@
+ #define __ASM_SH_CACHE_H
+ #ifdef __KERNEL__
+
++#include <linux/const.h>
+ #include <linux/init.h>
+ #include <cpu/cache.h>
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data.read_mostly")))
+
+diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
+index 8a8a993..7b3079b 100644
+--- a/arch/sh/kernel/cpu/sh4/sq.c
++++ b/arch/sh/kernel/cpu/sh4/sq.c
+@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
+ NULL,
+ };
+
+-static struct sysfs_ops sq_sysfs_ops = {
++static const struct sysfs_ops sq_sysfs_ops = {
+ .show = sq_sysfs_show,
+ .store = sq_sysfs_store,
+ };
+diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
+index ee3c2aa..c49cee6 100644
+--- a/arch/sh/kernel/cpu/shmobile/pm.c
++++ b/arch/sh/kernel/cpu/shmobile/pm.c
+@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
+ return 0;
+ }
+
+-static struct platform_suspend_ops sh_pm_ops = {
++static const struct platform_suspend_ops sh_pm_ops = {
+ .enter = sh_pm_enter,
+ .valid = suspend_valid_only_mem,
+ };
+diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
+index 3e532d0..9faa306 100644
+--- a/arch/sh/kernel/kgdb.c
++++ b/arch/sh/kernel/kgdb.c
+@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
+ {
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: trapa #0x3c */
+ #ifdef CONFIG_CPU_LITTLE_ENDIAN
+ .gdb_bpt_instr = { 0x3c, 0xc3 },
+diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
+index afeb710..8da5c79 100644
+--- a/arch/sh/mm/mmap.c
++++ b/arch/sh/mm/mmap.c
+@@ -49,6 +49,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+ int do_colour_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+@@ -74,8 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+
+@@ -106,7 +106,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -131,6 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ int do_colour_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+@@ -157,8 +158,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (TASK_SIZE - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+
+@@ -178,28 +178,29 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+- vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ addr -= len;
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /* remember the address as a hint for next time */
+- return (mm->free_area_cache = addr-len);
++ return (mm->free_area_cache = addr);
+ }
+ }
+
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = mm->mmap_base-len;
+- if (do_colour_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
++ addr = mm->mmap_base - len;
+
+ do {
++ if (do_colour_align)
++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -209,10 +210,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- if (do_colour_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len, offset);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 05ef538..dc9c857 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -32,6 +32,7 @@ config SPARC
+
+ config SPARC32
+ def_bool !64BIT
++ select GENERIC_ATOMIC64
+
+ config SPARC64
+ def_bool 64BIT
+diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
+index 0538555..c826059 100644
+--- a/arch/sparc/Makefile
++++ b/arch/sparc/Makefile
+@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
+ # Export what is needed by arch/sparc/boot/Makefile
+ export VMLINUX_INIT VMLINUX_MAIN
+ VMLINUX_INIT := $(head-y) $(init-y)
+-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
+ VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
+ VMLINUX_MAIN += $(drivers-y) $(net-y)
+
+diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
+index f0d343c..cf36e68 100644
+--- a/arch/sparc/include/asm/atomic_32.h
++++ b/arch/sparc/include/asm/atomic_32.h
+@@ -13,6 +13,8 @@
+
+ #include <linux/types.h>
+
++#include <asm-generic/atomic64.h>
++
+ #ifdef __KERNEL__
+
+ #include <asm/system.h>
+diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
+index f5cc06f..f858d47 100644
+--- a/arch/sparc/include/asm/atomic_64.h
++++ b/arch/sparc/include/asm/atomic_64.h
+@@ -14,18 +14,40 @@
+ #define ATOMIC64_INIT(i) { (i) }
+
+ #define atomic_read(v) ((v)->counter)
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
+ #define atomic64_read(v) ((v)->counter)
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return v->counter;
++}
+
+ #define atomic_set(v, i) (((v)->counter) = i)
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+ #define atomic64_set(v, i) (((v)->counter) = i)
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
+
+ extern void atomic_add(int, atomic_t *);
++extern void atomic_add_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_add(long, atomic64_t *);
++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
+ extern void atomic_sub(int, atomic_t *);
++extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
+ extern void atomic64_sub(long, atomic64_t *);
++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
+
+ extern int atomic_add_ret(int, atomic_t *);
++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
+ extern long atomic64_add_ret(long, atomic64_t *);
++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
+ extern int atomic_sub_ret(int, atomic_t *);
+ extern long atomic64_sub_ret(long, atomic64_t *);
+
+@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+ #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
+
+ #define atomic_inc_return(v) atomic_add_ret(1, v)
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_ret_unchecked(1, v);
++}
+ #define atomic64_inc_return(v) atomic64_add_ret(1, v)
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_ret_unchecked(1, v);
++}
+
+ #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
+ #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
+
+ #define atomic_add_return(i, v) atomic_add_ret(i, v)
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ return atomic_add_ret_unchecked(i, v);
++}
+ #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
++ return atomic64_add_ret_unchecked(i, v);
++}
+
+ /*
+ * atomic_inc_and_test - increment and test
+@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_inc_return_unchecked(v) == 0;
++}
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+ #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
+@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
+ #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
+
+ #define atomic_inc(v) atomic_add(1, v)
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ atomic_add_unchecked(1, v);
++}
+ #define atomic64_inc(v) atomic64_add(1, v)
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_add_unchecked(1, v);
++}
+
+ #define atomic_dec(v) atomic_sub(1, v)
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ atomic_sub_unchecked(1, v);
++}
+ #define atomic64_dec(v) atomic64_sub(1, v)
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ atomic64_sub_unchecked(1, v);
++}
+
+ #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
+ #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
+
+ #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
+ #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%icc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ #define atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+ #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
+
+ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addcc %2, %0, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "tvs %%xcc, 6\n"
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a)
++ : "cc");
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
+index 41f85ae..73b80b5 100644
+--- a/arch/sparc/include/asm/cache.h
++++ b/arch/sparc/include/asm/cache.h
+@@ -7,8 +7,10 @@
+ #ifndef _SPARC_CACHE_H
+ #define _SPARC_CACHE_H
+
++#include <linux/const.h>
++
+ #define L1_CACHE_SHIFT 5
+-#define L1_CACHE_BYTES 32
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+ #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
+
+ #ifdef CONFIG_SPARC32
+diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
+index 5a8c308..38def92 100644
+--- a/arch/sparc/include/asm/dma-mapping.h
++++ b/arch/sparc/include/asm/dma-mapping.h
+@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
+ #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+ #define dma_is_consistent(d, h) (1)
+
+-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
++extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
+ extern struct bus_type pci_bus_type;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
+ if (dev->bus == &pci_bus_type)
+@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
+@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free_coherent(dev, size, cpu_addr, dma_handle);
+diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
+index 381a1b5..b97e3ff 100644
+--- a/arch/sparc/include/asm/elf_32.h
++++ b/arch/sparc/include/asm/elf_32.h
+@@ -116,6 +116,13 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE 0x10000UL
++
++#define PAX_DELTA_MMAP_LEN 16
++#define PAX_DELTA_STACK_LEN 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
+index 9968085..c2106ef 100644
+--- a/arch/sparc/include/asm/elf_64.h
++++ b/arch/sparc/include/asm/elf_64.h
+@@ -163,6 +163,12 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
+index 156707b..aefa786 100644
+--- a/arch/sparc/include/asm/page_32.h
++++ b/arch/sparc/include/asm/page_32.h
+@@ -8,6 +8,8 @@
+ #ifndef _SPARC_PAGE_H
+ #define _SPARC_PAGE_H
+
++#include <linux/const.h>
++
+ #define PAGE_SHIFT 12
+
+ #ifndef __ASSEMBLY__
+diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
+index ca2b344..c6084f89 100644
+--- a/arch/sparc/include/asm/pgalloc_32.h
++++ b/arch/sparc/include/asm/pgalloc_32.h
+@@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
+ BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
+ #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
+ #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
++#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
+
+ BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
+ #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
+diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
+index 5bdfa2c..92b0706 100644
+--- a/arch/sparc/include/asm/pgalloc_64.h
++++ b/arch/sparc/include/asm/pgalloc_64.h
+@@ -25,6 +25,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+ }
+
+ #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
++#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
+
+ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
+index e0cabe7..efd60f1 100644
+--- a/arch/sparc/include/asm/pgtable_32.h
++++ b/arch/sparc/include/asm/pgtable_32.h
+@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
+ BTFIXUPDEF_INT(page_none)
+ BTFIXUPDEF_INT(page_copy)
+ BTFIXUPDEF_INT(page_readonly)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++BTFIXUPDEF_INT(page_shared_noexec)
++BTFIXUPDEF_INT(page_copy_noexec)
++BTFIXUPDEF_INT(page_readonly_noexec)
++#endif
++
+ BTFIXUPDEF_INT(page_kernel)
+
+ #define PMD_SHIFT SUN4C_PMD_SHIFT
+@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
+ #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
+ #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++extern pgprot_t PAGE_SHARED_NOEXEC;
++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ extern unsigned long page_kernel;
+
+ #ifdef MODULE
+diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
+index 1407c07..7e10231 100644
+--- a/arch/sparc/include/asm/pgtsrmmu.h
++++ b/arch/sparc/include/asm/pgtsrmmu.h
+@@ -115,6 +115,13 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
++#endif
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
+index 43e5147..47622a1 100644
+--- a/arch/sparc/include/asm/spinlock_64.h
++++ b/arch/sparc/include/asm/spinlock_64.h
+@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
+
+ /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
+
+-static void inline arch_read_lock(raw_rwlock_t *lock)
++static inline void arch_read_lock(raw_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__ (
+ "1: ldsw [%2], %0\n"
+ " brlz,pn %0, 2f\n"
+-"4: add %0, 1, %1\n"
++"4: addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
+ " .previous"
+ : "=&r" (tmp1), "=&r" (tmp2)
+ : "r" (lock)
+- : "memory");
++ : "memory", "cc");
+ }
+
+-static int inline arch_read_trylock(raw_rwlock_t *lock)
++static inline int arch_read_trylock(raw_rwlock_t *lock)
+ {
+ int tmp1, tmp2;
+
+@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
+ "1: ldsw [%2], %0\n"
+ " brlz,a,pn %0, 2f\n"
+ " mov 0, %0\n"
+-" add %0, 1, %1\n"
++" addcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%icc, 1b\n"
+@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
+ return tmp1;
+ }
+
+-static void inline arch_read_unlock(raw_rwlock_t *lock)
++static inline void arch_read_unlock(raw_rwlock_t *lock)
+ {
+ unsigned long tmp1, tmp2;
+
+ __asm__ __volatile__(
+ "1: lduw [%2], %0\n"
+-" sub %0, 1, %1\n"
++" subcc %0, 1, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++" tvs %%icc, 6\n"
++#endif
++
+ " cas [%2], %0, %1\n"
+ " cmp %0, %1\n"
+ " bne,pn %%xcc, 1b\n"
+@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
+ : "memory");
+ }
+
+-static void inline arch_write_lock(raw_rwlock_t *lock)
++static inline void arch_write_lock(raw_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2;
+
+@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
+ : "memory");
+ }
+
+-static void inline arch_write_unlock(raw_rwlock_t *lock)
++static inline void arch_write_unlock(raw_rwlock_t *lock)
+ {
+ __asm__ __volatile__(
+ " stw %%g0, [%0]"
+@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
+ : "memory");
+ }
+
+-static int inline arch_write_trylock(raw_rwlock_t *lock)
++static inline int arch_write_trylock(raw_rwlock_t *lock)
+ {
+ unsigned long mask, tmp1, tmp2, result;
+
+diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
+index 844d73a..f787fb9 100644
+--- a/arch/sparc/include/asm/thread_info_32.h
++++ b/arch/sparc/include/asm/thread_info_32.h
+@@ -50,6 +50,8 @@ struct thread_info {
+ unsigned long w_saved;
+
+ struct restart_block restart_block;
++
++ unsigned long lowest_stack;
+ };
+
+ /*
+diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
+index f78ad9a..a3213ed 100644
+--- a/arch/sparc/include/asm/thread_info_64.h
++++ b/arch/sparc/include/asm/thread_info_64.h
+@@ -68,6 +68,8 @@ struct thread_info {
+ struct pt_regs *kern_una_regs;
+ unsigned int kern_una_insn;
+
++ unsigned long lowest_stack;
++
+ unsigned long fpregs[0] __attribute__ ((aligned(64)));
+ };
+
+@@ -227,6 +229,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ /* flag bit 8 is available */
+ #define TIF_SECCOMP 9 /* secure computing */
+ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
++#define TIF_GRSEC_SETXID 11 /* update credentials on syscall entry/exit */
++
+ /* NOTE: Thread flags >= 12 should be ones we have no interest
+ * in using in assembly, else we can't use the mask as
+ * an immediate value in instructions such as andcc.
+@@ -247,12 +251,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
+ #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+ #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
+ #define _TIF_FREEZE (1<<TIF_FREEZE)
++#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
+
+ #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
+ _TIF_DO_NOTIFY_RESUME_MASK | \
+ _TIF_NEED_RESCHED | _TIF_PERFCTR)
+ #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
+
++#define _TIF_WORK_SYSCALL \
++ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
++ _TIF_GRSEC_SETXID)
++
++
+ /*
+ * Thread-synchronous status.
+ *
+diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
+index e88fbe5..bd0eda7 100644
+--- a/arch/sparc/include/asm/uaccess.h
++++ b/arch/sparc/include/asm/uaccess.h
+@@ -1,5 +1,6 @@
+ #ifndef ___ASM_SPARC_UACCESS_H
+ #define ___ASM_SPARC_UACCESS_H
++
+ #if defined(__sparc__) && defined(__arch64__)
+ #include <asm/uaccess_64.h>
+ #else
+diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
+index 8303ac4..07f333d 100644
+--- a/arch/sparc/include/asm/uaccess_32.h
++++ b/arch/sparc/include/asm/uaccess_32.h
+@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
+
+ static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) to, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) to, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
+ return __copy_user(to, (__force void __user *) from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
++ if (!__builtin_constant_p(n))
++ check_object_size(from, n, true);
++
+ return __copy_user(to, (__force void __user *) from, n);
+ }
+
+ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+- if (n && __access_ok((unsigned long) from, n))
++ if ((long)n < 0)
++ return n;
++
++ if (n && __access_ok((unsigned long) from, n)) {
++ if (!__builtin_constant_p(n))
++ check_object_size(to, n, false);
+ return __copy_user((__force void __user *) to, from, n);
+- else
++ } else
+ return n;
+ }
+
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ return __copy_user((__force void __user *) to, from, n);
+ }
+
+diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
+index 9ea271e..7b8a271 100644
+--- a/arch/sparc/include/asm/uaccess_64.h
++++ b/arch/sparc/include/asm/uaccess_64.h
+@@ -9,6 +9,7 @@
+ #include <linux/compiler.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
++#include <linux/kernel.h>
+ #include <asm/asi.h>
+ #include <asm/system.h>
+ #include <asm/spitfire.h>
+@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_from_user(to, from, size);
++ unsigned long ret;
+
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(to, size, false);
++
++ ret = ___copy_from_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_from_user_fixup(to, from, size);
+ return ret;
+@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
+ static inline unsigned long __must_check
+ copy_to_user(void __user *to, const void *from, unsigned long size)
+ {
+- unsigned long ret = ___copy_to_user(to, from, size);
++ unsigned long ret;
+
++ if ((long)size < 0 || size > INT_MAX)
++ return size;
++
++ if (!__builtin_constant_p(size))
++ check_object_size(from, size, true);
++
++ ret = ___copy_to_user(to, from, size);
+ if (unlikely(ret))
+ ret = copy_to_user_fixup(to, from, size);
+ return ret;
+diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
+index 2782681..77ded84 100644
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -3,7 +3,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ extra-y := head_$(BITS).o
+ extra-y += init_task.o
+diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
+index 6a831bd..b37a849 100644
+--- a/arch/sparc/kernel/ds.c
++++ b/arch/sparc/kernel/ds.c
+@@ -780,6 +780,16 @@ void ldom_set_var(const char *var, const char *value)
+ char *base, *p;
+ int msg_len, loops;
+
++ if (strlen(var) + strlen(value) + 2 >
++ sizeof(pkt) - sizeof(pkt.header)) {
++ printk(KERN_ERR PFX
++ "contents length: %zu, which more than max: %lu,"
++ "so could not set (%s) variable to (%s).\n",
++ strlen(var) + strlen(value) + 2,
++ sizeof(pkt) - sizeof(pkt.header), var, value);
++ return;
++ }
++
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.header.data.tag.type = DS_DATA;
+ pkt.header.data.handle = cp->handle;
+diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
+index 7690cc2..ece64c9 100644
+--- a/arch/sparc/kernel/iommu.c
++++ b/arch/sparc/kernel/iommu.c
+@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
+-static struct dma_map_ops sun4u_dma_ops = {
++static const struct dma_map_ops sun4u_dma_ops = {
+ .alloc_coherent = dma_4u_alloc_coherent,
+ .free_coherent = dma_4u_free_coherent,
+ .map_page = dma_4u_map_page,
+@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
+ .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
+ };
+
+-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
++const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
+diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
+index 9f61fd8..bd048db 100644
+--- a/arch/sparc/kernel/ioport.c
++++ b/arch/sparc/kernel/ioport.c
+@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ BUG();
+ }
+
+-struct dma_map_ops sbus_dma_ops = {
++const struct dma_map_ops sbus_dma_ops = {
+ .alloc_coherent = sbus_alloc_coherent,
+ .free_coherent = sbus_free_coherent,
+ .map_page = sbus_map_page,
+@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
+ .sync_sg_for_device = sbus_sync_sg_for_device,
+ };
+
+-struct dma_map_ops *dma_ops = &sbus_dma_ops;
++const struct dma_map_ops *dma_ops = &sbus_dma_ops;
+ EXPORT_SYMBOL(dma_ops);
+
+ static int __init sparc_register_ioport(void)
+@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
+ }
+ }
+
+-struct dma_map_ops pci32_dma_ops = {
++const struct dma_map_ops pci32_dma_ops = {
+ .alloc_coherent = pci32_alloc_coherent,
+ .free_coherent = pci32_free_coherent,
+ .map_page = pci32_map_page,
+diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
+index 04df4ed..55c4b6e 100644
+--- a/arch/sparc/kernel/kgdb_32.c
++++ b/arch/sparc/kernel/kgdb_32.c
+@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
+ {
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: ta 0x7d */
+ .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
+ };
+diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
+index f5a0fd4..d886f71 100644
+--- a/arch/sparc/kernel/kgdb_64.c
++++ b/arch/sparc/kernel/kgdb_64.c
+@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
+ {
+ }
+
+-struct kgdb_arch arch_kgdb_ops = {
++const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: ta 0x72 */
+ .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
+ };
+diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
+index 23c33ff..d137fbd 100644
+--- a/arch/sparc/kernel/pci_sun4v.c
++++ b/arch/sparc/kernel/pci_sun4v.c
+@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
+-static struct dma_map_ops sun4v_dma_ops = {
++static const struct dma_map_ops sun4v_dma_ops = {
+ .alloc_coherent = dma_4v_alloc_coherent,
+ .free_coherent = dma_4v_free_coherent,
+ .map_page = dma_4v_map_page,
+diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
+index c49865b..b41a81b 100644
+--- a/arch/sparc/kernel/process_32.c
++++ b/arch/sparc/kernel/process_32.c
+@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
+ rw->ins[4], rw->ins[5],
+ rw->ins[6],
+ rw->ins[7]);
+- printk("%pS\n", (void *) rw->ins[7]);
++ printk("%pA\n", (void *) rw->ins[7]);
+ rw = (struct reg_window32 *) rw->ins[6];
+ }
+ spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
+@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
+
+ printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
+ r->psr, r->pc, r->npc, r->y, print_tainted());
+- printk("PC: <%pS>\n", (void *) r->pc);
++ printk("PC: <%pA>\n", (void *) r->pc);
+ printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
+ r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
+ printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
+ r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
+
+ printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ rw = (struct reg_window32 *) fp;
+ pc = rw->ins[7];
+ printk("[%08lx : ", pc);
+- printk("%pS ] ", (void *) pc);
++ printk("%pA ] ", (void *) pc);
+ fp = rw->ins[6];
+ } while (++count < 16);
+ printk("\n");
+diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
+index cb70476..3d0c191 100644
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
+ printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+ rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
+ if (regs->tstate & TSTATE_PRIV)
+- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
++ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
+ }
+
+ void show_regs(struct pt_regs *regs)
+ {
+ printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
+ regs->tpc, regs->tnpc, regs->y, print_tainted());
+- printk("TPC: <%pS>\n", (void *) regs->tpc);
++ printk("TPC: <%pA>\n", (void *) regs->tpc);
+ printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
+ printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
++ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
+ show_regwindow(regs);
+ }
+
+@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
+ ((tp && tp->task) ? tp->task->pid : -1));
+
+ if (gp->tstate & TSTATE_PRIV) {
+- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
++ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
+ (void *) gp->tpc,
+ (void *) gp->o7,
+ (void *) gp->i7,
+diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
+index 4ae91dc..c2e705e 100644
+--- a/arch/sparc/kernel/ptrace_64.c
++++ b/arch/sparc/kernel/ptrace_64.c
+@@ -1049,6 +1049,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+ return ret;
+ }
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++extern void gr_delayed_cred_worker(void);
++#endif
++
+ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ {
+ int ret = 0;
+@@ -1056,6 +1060,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+ /* do the secure computing check first */
+ secure_computing(regs->u_regs[UREG_G1]);
+
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ret = tracehook_report_syscall_entry(regs);
+
+@@ -1074,6 +1083,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+
+ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+ {
++#ifdef CONFIG_GRKERNSEC_SETXID
++ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
++ gr_delayed_cred_worker();
++#endif
++
+ if (unlikely(current->audit_context)) {
+ unsigned long tstate = regs->tstate;
+ int result = AUDITSC_SUCCESS;
+diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
+index 6edc4e5..06a69b4 100644
+--- a/arch/sparc/kernel/sigutil_64.c
++++ b/arch/sparc/kernel/sigutil_64.c
+@@ -2,6 +2,7 @@
+ #include <linux/types.h>
+ #include <linux/thread_info.h>
+ #include <linux/uaccess.h>
++#include <linux/errno.h>
+
+ #include <asm/sigcontext.h>
+ #include <asm/fpumacro.h>
+diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
+index 3a82e65..c066b9b 100644
+--- a/arch/sparc/kernel/sys_sparc_32.c
++++ b/arch/sparc/kernel/sys_sparc_32.c
+@@ -40,6 +40,7 @@ asmlinkage unsigned long sys_getpagesize(void)
+ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+ {
+ struct vm_area_struct * vmm;
++ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+@@ -57,7 +58,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (ARCH_SUN4C && len > 0x20000000)
+ return -ENOMEM;
+ if (!addr)
+- addr = TASK_UNMAPPED_BASE;
++ addr = current->mm->mmap_base;
+
+ if (flags & MAP_SHARED)
+ addr = COLOUR_ALIGN(addr);
+@@ -72,7 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ }
+ if (TASK_SIZE - PAGE_SIZE - len < addr)
+ return -ENOMEM;
+- if (!vmm || addr + len <= vmm->vm_start)
++ if (check_heap_stack_gap(vmm, &addr, len, offset))
+ return addr;
+ addr = vmm->vm_end;
+ if (flags & MAP_SHARED)
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index cfa0e19..fd81c1b 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -120,12 +120,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ unsigned long task_size = TASK_SIZE;
+ unsigned long start_addr;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ if (flags & MAP_FIXED) {
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -140,6 +141,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -147,15 +152,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+
+ if (len > mm->cached_hole_size) {
+- start_addr = addr = mm->free_area_cache;
++ start_addr = addr = mm->free_area_cache;
+ } else {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ }
+
+@@ -175,14 +179,14 @@ full_search:
+ vma = find_vma(mm, VA_EXCLUDE_END);
+ }
+ if (unlikely(task_size < addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != mm->mmap_base) {
++ start_addr = addr = mm->mmap_base;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -208,6 +212,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ unsigned long task_size = STACK_TOP32;
+ unsigned long addr = addr0;
+ int do_color_align;
++ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
+
+ /* This should only ever run for 32-bit processes. */
+ BUG_ON(!test_thread_flag(TIF_32BIT));
+@@ -216,7 +221,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ /* We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+- if ((flags & MAP_SHARED) &&
++ if ((filp || (flags & MAP_SHARED)) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
+ return -EINVAL;
+ return addr;
+@@ -237,8 +242,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+
+@@ -258,28 +262,29 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+- vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ addr -= len;
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /* remember the address as a hint for next time */
+- return (mm->free_area_cache = addr-len);
++ return (mm->free_area_cache = addr);
+ }
+ }
+
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = mm->mmap_base-len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
++ addr = mm->mmap_base - len;
+
+ do {
++ if (do_color_align)
++ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -289,10 +294,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = vma->vm_start-len;
+- if (do_color_align)
+- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len, offset);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -366,6 +369,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ {
+ unsigned long random_factor = 0UL;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
++#endif
++
+ if (current->flags & PF_RANDOMIZE) {
+ random_factor = get_random_int();
+ if (test_thread_flag(TIF_32BIT))
+@@ -384,6 +391,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
+ sysctl_legacy_va_layout) {
+ mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base += mm->delta_mmap;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area;
+ mm->unmap_area = arch_unmap_area;
+ } else {
+@@ -398,6 +411,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ gap = (task_size / 6 * 5);
+
+ mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (mm->pax_flags & MF_PAX_RANDMMAP)
++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
++#endif
++
+ mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ mm->unmap_area = arch_unmap_area_topdown;
+ }
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index d150c2a..bffda9d 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
+ #endif
+ .align 32
+ 1: ldx [%g6 + TI_FLAGS], %l5
+- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
++ andcc %l5, _TIF_WORK_SYSCALL, %g0
+ be,pt %icc, rtrap
+ nop
+ call syscall_trace_leave
+@@ -198,7 +198,7 @@ linux_sparc_syscall32:
+
+ srl %i5, 0, %o5 ! IEU1
+ srl %i2, 0, %o2 ! IEU0 Group
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ bne,pn %icc, linux_syscall_trace32 ! CTI
+ mov %i0, %l5 ! IEU1
+ call %l7 ! CTI Group brk forced
+@@ -221,7 +221,7 @@ linux_sparc_syscall:
+
+ mov %i3, %o3 ! IEU1
+ mov %i4, %o4 ! IEU0 Group
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
++ andcc %l0, _TIF_WORK_SYSCALL, %g0
+ bne,pn %icc, linux_syscall_trace ! CTI Group
+ mov %i0, %l5 ! IEU0
+ 2: call %l7 ! CTI Group brk forced
+@@ -245,7 +245,7 @@ ret_sys_call:
+
+ cmp %o0, -ERESTART_RESTARTBLOCK
+ bgeu,pn %xcc, 1f
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
++ andcc %l0, _TIF_WORK_SYSCALL, %l6
+ 80:
+ /* System call success, clear Carry condition code. */
+ andn %g3, %g2, %g3
+@@ -260,7 +260,7 @@ ret_sys_call:
+ /* System call failure, set Carry condition code.
+ * Also, get abs(errno) to return to the process.
+ */
+- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
++ andcc %l0, _TIF_WORK_SYSCALL, %l6
+ sub %g0, %o0, %o0
+ or %g3, %g2, %g3
+ stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
+index c0490c7..84959d1 100644
+--- a/arch/sparc/kernel/traps_32.c
++++ b/arch/sparc/kernel/traps_32.c
+@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
+ #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
+ #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
+
++extern void gr_handle_kernel_exploit(void);
++
+ void die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+ count++ < 30 &&
+ (((unsigned long) rw) >= PAGE_OFFSET) &&
+ !(((unsigned long) rw) & 0x7)) {
+- printk("Caller[%08lx]: %pS\n", rw->ins[7],
++ printk("Caller[%08lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+ rw = (struct reg_window32 *)rw->ins[6];
+ }
+ }
+ printk("Instruction DUMP:");
+ instruction_dump ((unsigned long *) regs->pc);
+- if(regs->psr & PSR_PS)
++ if(regs->psr & PSR_PS) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
+ do_exit(SIGSEGV);
+ }
+
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index 10f7bb9..cdb6793 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
+ i + 1,
+ p->trapstack[i].tstate, p->trapstack[i].tpc,
+ p->trapstack[i].tnpc, p->trapstack[i].tt);
+- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
++ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
+ }
+ }
+
+@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
+
+ lvl -= 0x100;
+ if (regs->tstate & TSTATE_PRIV) {
++
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ sprintf(buffer, "Kernel bad sw trap %lx", lvl);
+ die_if_kernel(buffer, regs);
+ }
+@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
+ void bad_trap_tl1(struct pt_regs *regs, long lvl)
+ {
+ char buffer[32];
+-
++
+ if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
+ 0, lvl, SIGTRAP) == NOTIFY_STOP)
+ return;
+
++#ifdef CONFIG_PAX_REFCOUNT
++ if (lvl == 6)
++ pax_report_refcount_overflow(regs);
++#endif
++
+ dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
+
+ sprintf (buffer, "Bad trap %lx at tl>0", lvl);
+@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
+ regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
+ printk("%s" "ERROR(%d): ",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
+- printk("TPC<%pS>\n", (void *) regs->tpc);
++ printk("TPC<%pA>\n", (void *) regs->tpc);
+ printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
+ (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
+ (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
+@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
+ panic("Irrecoverable Cheetah+ parity error.");
+ }
+
+@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
+ smp_processor_id(),
+ (type & 0x1) ? 'I' : 'D',
+ regs->tpc);
+- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
+ }
+
+ struct sun4v_error_entry {
+@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
+
+ printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
+ regs->tpc, tl);
+- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
++ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
+ printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
+- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
++ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
+ (void *) regs->u_regs[UREG_I7]);
+ printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
+ "pte[%lx] error[%lx]\n",
+@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
+ fp = (unsigned long)sf->fp + STACK_BIAS;
+ }
+
+- printk(" [%016lx] %pS\n", pc, (void *) pc);
++ printk(" [%016lx] %pA\n", pc, (void *) pc);
+ } while (++count < 16);
+ }
+
+@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
+ return (struct reg_window *) (fp + STACK_BIAS);
+ }
+
++extern void gr_handle_kernel_exploit(void);
++
+ void die_if_kernel(char *str, struct pt_regs *regs)
+ {
+ static int die_counter;
+@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+ while (rw &&
+ count++ < 30&&
+ is_kernel_stack(current, rw)) {
+- printk("Caller[%016lx]: %pS\n", rw->ins[7],
++ printk("Caller[%016lx]: %pA\n", rw->ins[7],
+ (void *) rw->ins[7]);
+
+ rw = kernel_stack_up(rw);
+@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
+ }
+ user_instruction_dump ((unsigned int __user *) regs->tpc);
+ }
+- if (regs->tstate & TSTATE_PRIV)
++ if (regs->tstate & TSTATE_PRIV) {
++ gr_handle_kernel_exploit();
+ do_exit(SIGKILL);
++ }
++
+ do_exit(SIGSEGV);
+ }
+ EXPORT_SYMBOL(die_if_kernel);
+diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
+index be183fe..1c8d332 100644
+--- a/arch/sparc/kernel/una_asm_64.S
++++ b/arch/sparc/kernel/una_asm_64.S
+@@ -127,7 +127,7 @@ do_int_load:
+ wr %o5, 0x0, %asi
+ retl
+ mov 0, %o0
+- .size __do_int_load, .-__do_int_load
++ .size do_int_load, .-do_int_load
+
+ .section __ex_table,"a"
+ .word 4b, __retl_efault
+diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
+index 3792099..2af17d8 100644
+--- a/arch/sparc/kernel/unaligned_64.c
++++ b/arch/sparc/kernel/unaligned_64.c
+@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
+ if (count < 5) {
+ last_time = jiffies;
+ count++;
+- printk("Kernel unaligned access at TPC[%lx] %pS\n",
++ printk("Kernel unaligned access at TPC[%lx] %pA\n",
+ regs->tpc, (void *) regs->tpc);
+ }
+ }
+diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
+index 365b646..624f037 100644
+--- a/arch/sparc/kernel/us3_cpufreq.c
++++ b/arch/sparc/kernel/us3_cpufreq.c
+@@ -197,6 +197,20 @@ static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
+ return 0;
+ }
+
++static int __init us3_freq_init(void);
++static void __exit us3_freq_exit(void);
++
++static struct cpufreq_driver _cpufreq_us3_driver = {
++ .init = us3_freq_cpu_init,
++ .verify = us3_freq_verify,
++ .target = us3_freq_target,
++ .get = us3_freq_get,
++ .exit = us3_freq_cpu_exit,
++ .owner = THIS_MODULE,
++ .name = "UltraSPARC-III",
++
++};
++
+ static int __init us3_freq_init(void)
+ {
+ unsigned long manuf, impl, ver;
+@@ -214,39 +228,22 @@ static int __init us3_freq_init(void)
+ impl == CHEETAH_PLUS_IMPL ||
+ impl == JAGUAR_IMPL ||
+ impl == PANTHER_IMPL)) {
+- struct cpufreq_driver *driver;
+-
+ ret = -ENOMEM;
+- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+- if (!driver)
+- goto err_out;
+-
+ us3_freq_table = kzalloc(
+ (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
+ GFP_KERNEL);
+ if (!us3_freq_table)
+ goto err_out;
+
+- driver->init = us3_freq_cpu_init;
+- driver->verify = us3_freq_verify;
+- driver->target = us3_freq_target;
+- driver->get = us3_freq_get;
+- driver->exit = us3_freq_cpu_exit;
+- driver->owner = THIS_MODULE,
+- strcpy(driver->name, "UltraSPARC-III");
+-
+- cpufreq_us3_driver = driver;
+- ret = cpufreq_register_driver(driver);
++ cpufreq_us3_driver = &_cpufreq_us3_driver;
++ ret = cpufreq_register_driver(cpufreq_us3_driver);
+ if (ret)
+ goto err_out;
+
+ return 0;
+
+ err_out:
+- if (driver) {
+- kfree(driver);
+- cpufreq_us3_driver = NULL;
+- }
++ cpufreq_us3_driver = NULL;
+ kfree(us3_freq_table);
+ us3_freq_table = NULL;
+ return ret;
+@@ -259,7 +256,6 @@ static void __exit us3_freq_exit(void)
+ {
+ if (cpufreq_us3_driver) {
+ cpufreq_unregister_driver(cpufreq_us3_driver);
+- kfree(cpufreq_us3_driver);
+ cpufreq_us3_driver = NULL;
+ kfree(us3_freq_table);
+ us3_freq_table = NULL;
+diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
+index e75faf0..24f12f9 100644
+--- a/arch/sparc/lib/Makefile
++++ b/arch/sparc/lib/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi -DST_DIV0=0x02
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
+ lib-$(CONFIG_SPARC32) += memcpy.o memset.o
+diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
+index 0268210..f0291ca 100644
+--- a/arch/sparc/lib/atomic_64.S
++++ b/arch/sparc/lib/atomic_64.S
+@@ -18,7 +18,12 @@
+ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add, .-atomic_add
+
++ .globl atomic_add_unchecked
++ .type atomic_add_unchecked,#function
++atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ add %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_unchecked, .-atomic_add_unchecked
++
+ .globl atomic_sub
+ .type atomic_sub,#function
+ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_sub, .-atomic_sub
+
++ .globl atomic_sub_unchecked
++ .type atomic_sub_unchecked,#function
++atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ sub %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_sub_unchecked, .-atomic_sub_unchecked
++
+ .globl atomic_add_ret
+ .type atomic_add_ret,#function
+ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic_add_ret, .-atomic_add_ret
+
++ .globl atomic_add_ret_unchecked
++ .type atomic_add_ret_unchecked,#function
++atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: lduw [%o1], %g1
++ addcc %g1, %o0, %g7
++ cas [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %icc, 2f
++ add %g7, %o0, %g7
++ sra %g7, 0, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
++
+ .globl atomic_sub_ret
+ .type atomic_sub_ret,#function
+ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: lduw [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 2f
+@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add, .-atomic64_add
+
++ .globl atomic64_add_unchecked
++ .type atomic64_add_unchecked,#function
++atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_unchecked, .-atomic64_add_unchecked
++
+ .globl atomic64_sub
+ .type atomic64_sub,#function
+ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_sub, .-atomic64_sub
+
++ .globl atomic64_sub_unchecked
++ .type atomic64_sub_unchecked,#function
++atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ subcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ nop
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
++
+ .globl atomic64_add_ret
+ .type atomic64_add_ret,#function
+ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- add %g1, %o0, %g7
++ addcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
+ 2: BACKOFF_SPIN(%o2, %o3, 1b)
+ .size atomic64_add_ret, .-atomic64_add_ret
+
++ .globl atomic64_add_ret_unchecked
++ .type atomic64_add_ret_unchecked,#function
++atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
++ BACKOFF_SETUP(%o2)
++1: ldx [%o1], %g1
++ addcc %g1, %o0, %g7
++ casx [%o1], %g1, %g7
++ cmp %g1, %g7
++ bne,pn %xcc, 2f
++ add %g7, %o0, %g7
++ mov %g7, %o0
++ retl
++ nop
++2: BACKOFF_SPIN(%o2, %o3, 1b)
++ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
++
+ .globl atomic64_sub_ret
+ .type atomic64_sub_ret,#function
+ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
+ BACKOFF_SETUP(%o2)
+ 1: ldx [%o1], %g1
+- sub %g1, %o0, %g7
++ subcc %g1, %o0, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %xcc, 6
++#endif
++
+ casx [%o1], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %xcc, 2f
+diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
+index 704b126..2e79d76 100644
+--- a/arch/sparc/lib/ksyms.c
++++ b/arch/sparc/lib/ksyms.c
+@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
+
+ /* Atomic counter implementation. */
+ EXPORT_SYMBOL(atomic_add);
++EXPORT_SYMBOL(atomic_add_unchecked);
+ EXPORT_SYMBOL(atomic_add_ret);
++EXPORT_SYMBOL(atomic_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic_sub);
++EXPORT_SYMBOL(atomic_sub_unchecked);
+ EXPORT_SYMBOL(atomic_sub_ret);
+ EXPORT_SYMBOL(atomic64_add);
++EXPORT_SYMBOL(atomic64_add_unchecked);
+ EXPORT_SYMBOL(atomic64_add_ret);
++EXPORT_SYMBOL(atomic64_add_ret_unchecked);
+ EXPORT_SYMBOL(atomic64_sub);
++EXPORT_SYMBOL(atomic64_sub_unchecked);
+ EXPORT_SYMBOL(atomic64_sub_ret);
+
+ /* Atomic bit operations. */
+diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
+index 91a7d29..ce75c29 100644
+--- a/arch/sparc/lib/rwsem_64.S
++++ b/arch/sparc/lib/rwsem_64.S
+@@ -11,7 +11,12 @@
+ .globl __down_read
+ __down_read:
+ 1: lduw [%o0], %g1
+- add %g1, 1, %g7
++ addcc %g1, 1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 1b
+@@ -33,7 +38,12 @@ __down_read:
+ .globl __down_read_trylock
+ __down_read_trylock:
+ 1: lduw [%o0], %g1
+- add %g1, 1, %g7
++ addcc %g1, 1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cmp %g7, 0
+ bl,pn %icc, 2f
+ mov 0, %o1
+@@ -51,7 +61,12 @@ __down_write:
+ or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+ 1:
+ lduw [%o0], %g3
+- add %g3, %g1, %g7
++ addcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+@@ -77,7 +92,12 @@ __down_write_trylock:
+ cmp %g3, 0
+ bne,pn %icc, 2f
+ mov 0, %o1
+- add %g3, %g1, %g7
++ addcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+@@ -90,7 +110,12 @@ __down_write_trylock:
+ __up_read:
+ 1:
+ lduw [%o0], %g1
+- sub %g1, 1, %g7
++ subcc %g1, 1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g1, %g7
+ cmp %g1, %g7
+ bne,pn %icc, 1b
+@@ -118,7 +143,12 @@ __up_write:
+ or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
+ 1:
+ lduw [%o0], %g3
+- sub %g3, %g1, %g7
++ subcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+@@ -143,7 +173,12 @@ __downgrade_write:
+ or %g1, %lo(RWSEM_WAITING_BIAS), %g1
+ 1:
+ lduw [%o0], %g3
+- sub %g3, %g1, %g7
++ subcc %g3, %g1, %g7
++
++#ifdef CONFIG_PAX_REFCOUNT
++ tvs %icc, 6
++#endif
++
+ cas [%o0], %g3, %g7
+ cmp %g3, %g7
+ bne,pn %icc, 1b
+diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
+index 79836a7..62f47a2 100644
+--- a/arch/sparc/mm/Makefile
++++ b/arch/sparc/mm/Makefile
+@@ -2,7 +2,7 @@
+ #
+
+ asflags-y := -ansi
+-ccflags-y := -Werror
++#ccflags-y := -Werror
+
+ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
+ obj-y += fault_$(BITS).o
+diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
+index b99f81c..0a112f6 100644
+--- a/arch/sparc/mm/fault_32.c
++++ b/arch/sparc/mm/fault_32.c
+@@ -21,6 +21,9 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/kdebug.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/system.h>
+ #include <asm/page.h>
+@@ -167,6 +170,276 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->pc);
++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->pc);
++
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++ unsigned int addr;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, bajmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ else
++ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->pc);
++ err |= get_user(ba, (unsigned int *)(regs->pc+4));
++ err |= get_user(nop, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->pc-4));
++ err |= get_user(call, (unsigned int *)regs->pc);
++ err |= get_user(nop, (unsigned int *)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+ unsigned long address)
+ {
+@@ -231,6 +504,24 @@ good_area:
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
+index 43b0da9..f9f9985 100644
+--- a/arch/sparc/mm/fault_64.c
++++ b/arch/sparc/mm/fault_64.c
+@@ -20,6 +20,9 @@
+ #include <linux/kprobes.h>
+ #include <linux/kdebug.h>
+ #include <linux/percpu.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
+ printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+ regs->tpc);
+ printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
+- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
++ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
+ printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
+ dump_stack();
+ unhandled_fault(regs->tpc, current, regs);
+@@ -249,6 +252,465 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
+ show_regs(regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_DLRESOLVE
++static void pax_emuplt_close(struct vm_area_struct *vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ unsigned int *kaddr;
++
++ vmf->page = alloc_page(GFP_HIGHUSER);
++ if (!vmf->page)
++ return VM_FAULT_OOM;
++
++ kaddr = kmap(vmf->page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(vmf->page);
++ kunmap(vmf->page);
++ return VM_FAULT_MAJOR;
++}
++
++static const struct vm_operations_struct pax_vm_ops = {
++ .close = pax_emuplt_close,
++ .fault = pax_emuplt_fault
++};
++
++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ int ret;
++
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++ vma->vm_ops = &pax_vm_ops;
++
++ ret = insert_vm_struct(current->mm, vma);
++ if (ret)
++ return ret;
++
++ ++current->mm->total_vm;
++ return 0;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int *)regs->tpc);
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int *)regs->tpc);
++
++ if (err)
++ break;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
++ unsigned long addr;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, bajmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
++ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ else
++ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int sethi, mov1, call, mov2;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(call, (unsigned int *)(regs->tpc+8));
++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
++ err |= get_user(or, (unsigned int *)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020U &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ err = get_user(save, (unsigned int *)addr);
++ err |= get_user(call, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ if (err)
++ break;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma)
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ if (pax_insert_vma(vma, call_dl_resolve)) {
++ up_write(&current->mm->mmap_sem);
++ kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++#endif
++
++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
++ if ((save & 0xFFC00000U) == 0x05000000U &&
++ (call & 0xFFFFE000U) == 0x85C0A000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G2] = addr + 4;
++ addr = (save & 0x003FFFFFU) << 10;
++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++
++ /* PaX: 64-bit PLT stub */
++ err = get_user(sethi1, (unsigned int *)addr);
++ err |= get_user(sethi2, (unsigned int *)(addr+4));
++ err |= get_user(or1, (unsigned int *)(addr+8));
++ err |= get_user(or2, (unsigned int *)(addr+12));
++ err |= get_user(sllx, (unsigned int *)(addr+16));
++ err |= get_user(add, (unsigned int *)(addr+20));
++ err |= get_user(jmpl, (unsigned int *)(addr+24));
++ err |= get_user(nop, (unsigned int *)(addr+28));
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x88112000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x89293020U &&
++ add == 0x8A010005U &&
++ jmpl == 0x89C14000U &&
++ nop == 0x01000000U)
++ {
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G4] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
++ regs->u_regs[UREG_G4] = addr + 24;
++ addr = regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int *)(regs->tpc-4));
++ err |= get_user(call, (unsigned int *)regs->tpc);
++ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ dl_resolve &= 0xFFFFFFFFUL;
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int *)regs->tpc);
++ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (ba & 0xFFF00000U) == 0x30600000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ if (test_thread_flag(TIF_32BIT))
++ addr &= 0xFFFFFFFFUL;
++
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int *)pc+i))
++ printk(KERN_CONT "???????? ");
++ else
++ printk(KERN_CONT "%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -315,6 +777,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_group_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
+index f27d103..9f5fc4f 100644
+--- a/arch/sparc/mm/hugetlbpage.c
++++ b/arch/sparc/mm/hugetlbpage.c
+@@ -30,7 +30,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+- unsigned long flags)
++ unsigned long flags,
++ unsigned long offset)
+ {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct * vma;
+@@ -69,7 +70,7 @@ full_search:
+ }
+ return -ENOMEM;
+ }
+- if (likely(!vma || addr + len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+@@ -87,7 +88,8 @@ static unsigned long
+ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len,
+ const unsigned long pgoff,
+- const unsigned long flags)
++ const unsigned long flags,
++ const unsigned long offset)
+ {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+@@ -107,26 +109,28 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+
+ /* make sure it can fit in the remaining address space */
+ if (likely(addr > len)) {
+- vma = find_vma(mm, addr-len);
+- if (!vma || addr <= vma->vm_start) {
++ addr -= len;
++ vma = find_vma(mm, addr);
++ if (check_heap_stack_gap(vma, &addr, len, offset)) {
+ /* remember the address as a hint for next time */
+- return (mm->free_area_cache = addr-len);
++ return (mm->free_area_cache = addr);
+ }
+ }
+
+ if (unlikely(mm->mmap_base < len))
+ goto bottomup;
+
+- addr = (mm->mmap_base-len) & HPAGE_MASK;
++ addr = mm->mmap_base - len;
+
+ do {
++ addr &= HPAGE_MASK;
+ /*
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
+ */
+ vma = find_vma(mm, addr);
+- if (likely(!vma || addr+len <= vma->vm_start)) {
++ if (likely(check_heap_stack_gap(vma, &addr, len, offset))) {
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ }
+@@ -136,8 +140,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+- addr = (vma->vm_start-len) & HPAGE_MASK;
+- } while (likely(len < vma->vm_start));
++ addr = skip_heap_stack_gap(vma, len, offset);
++ } while (!IS_ERR_VALUE(addr));
+
+ bottomup:
+ /*
+@@ -165,6 +169,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long task_size = TASK_SIZE;
++ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
+
+ if (test_thread_flag(TIF_32BIT))
+ task_size = STACK_TOP32;
+@@ -183,16 +188,15 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ if (addr) {
+ addr = ALIGN(addr, HPAGE_SIZE);
+ vma = find_vma(mm, addr);
+- if (task_size - len >= addr &&
+- (!vma || addr + len <= vma->vm_start))
++ if (task_size - len >= addr && check_heap_stack_gap(vma, &addr, len, offset))
+ return addr;
+ }
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+ return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+- pgoff, flags);
++ pgoff, flags, offset);
+ else
+ return hugetlb_get_unmapped_area_topdown(file, addr, len,
+- pgoff, flags);
++ pgoff, flags, offset);
+ }
+
+ pte_t *huge_pte_alloc(struct mm_struct *mm,
+diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
+index dc7c3b1..34c0070 100644
+--- a/arch/sparc/mm/init_32.c
++++ b/arch/sparc/mm/init_32.c
+@@ -317,6 +317,9 @@ extern void device_scan(void);
+ pgprot_t PAGE_SHARED __read_mostly;
+ EXPORT_SYMBOL(PAGE_SHARED);
+
++pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
++EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
++
+ void __init paging_init(void)
+ {
+ switch(sparc_cpu_model) {
+@@ -345,17 +348,17 @@ void __init paging_init(void)
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+- protection_map[1] = PAGE_READONLY;
+- protection_map[2] = PAGE_COPY;
+- protection_map[3] = PAGE_COPY;
++ protection_map[1] = PAGE_READONLY_NOEXEC;
++ protection_map[2] = PAGE_COPY_NOEXEC;
++ protection_map[3] = PAGE_COPY_NOEXEC;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+- protection_map[9] = PAGE_READONLY;
+- protection_map[10] = PAGE_SHARED;
+- protection_map[11] = PAGE_SHARED;
++ protection_map[9] = PAGE_READONLY_NOEXEC;
++ protection_map[10] = PAGE_SHARED_NOEXEC;
++ protection_map[11] = PAGE_SHARED_NOEXEC;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
+index 509b1ff..bfd7118 100644
+--- a/arch/sparc/mm/srmmu.c
++++ b/arch/sparc/mm/srmmu.c
+@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
+ PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
++#endif
++
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index fc633db..b46e213 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
+ $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
+ $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
+
++ifdef CONSTIFY_PLUGIN
++USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
++
+ include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
+
+ #This will adjust *FLAGS accordingly to the platform.
+diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
+index 19e1bdd..3665b77 100644
+--- a/arch/um/include/asm/cache.h
++++ b/arch/um/include/asm/cache.h
+@@ -1,6 +1,7 @@
+ #ifndef __UM_CACHE_H
+ #define __UM_CACHE_H
+
++#include <linux/const.h>
+
+ #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
+ # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+@@ -12,6 +13,6 @@
+ # define L1_CACHE_SHIFT 5
+ #endif
+
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #endif
+diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
+index 6c03acd..a5e0215 100644
+--- a/arch/um/include/asm/kmap_types.h
++++ b/arch/um/include/asm/kmap_types.h
+@@ -23,6 +23,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_CLEARPAGE,
+ KM_TYPE_NR
+ };
+
+diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
+index 4cc9b6c..02e5029 100644
+--- a/arch/um/include/asm/page.h
++++ b/arch/um/include/asm/page.h
+@@ -14,6 +14,9 @@
+ #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++
+ #ifndef __ASSEMBLY__
+
+ struct page;
+diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
+index 084de4a..f44d25c 100644
+--- a/arch/um/include/asm/pgtable-3level.h
++++ b/arch/um/include/asm/pgtable-3level.h
+@@ -58,6 +58,7 @@
+ #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
+ #define pud_populate(mm, pud, pmd) \
+ set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
++#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
+
+ #ifdef CONFIG_64BIT
+ #define set_pud(pudptr, pudval) set_64bit((phys_t *) (pudptr), pud_val(pudval))
+diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
+index 4a28a15..654dc2a 100644
+--- a/arch/um/kernel/process.c
++++ b/arch/um/kernel/process.c
+@@ -393,22 +393,6 @@ int singlestepping(void * t)
+ return 2;
+ }
+
+-/*
+- * Only x86 and x86_64 have an arch_align_stack().
+- * All other arches have "#define arch_align_stack(x) (x)"
+- * in their asm/system.h
+- * As this is included in UML from asm-um/system-generic.h,
+- * we can use it to behave as the subarch does.
+- */
+-#ifndef arch_align_stack
+-unsigned long arch_align_stack(unsigned long sp)
+-{
+- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+- sp -= get_random_int() % 8192;
+- return sp & ~0xf;
+-}
+-#endif
+-
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ unsigned long stack_page, sp, ip;
+diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
+index d1b93c4..ae1b7fd 100644
+--- a/arch/um/sys-i386/shared/sysdep/system.h
++++ b/arch/um/sys-i386/shared/sysdep/system.h
+@@ -17,7 +17,7 @@
+ # define AT_VECTOR_SIZE_ARCH 1
+ #endif
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ void default_idle(void);
+
+diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
+index 857ca0b..9a2669d 100644
+--- a/arch/um/sys-i386/syscalls.c
++++ b/arch/um/sys-i386/syscalls.c
+@@ -11,6 +11,21 @@
+ #include "asm/uaccess.h"
+ #include "asm/unistd.h"
+
++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
++{
++ unsigned long pax_task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
++ pax_task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > pax_task_size || addr > pax_task_size - len)
++ return -EINVAL;
++
++ return 0;
++}
++
+ /*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. Linux/i386 didn't use to be able to handle more than
+diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
+index d1b93c4..ae1b7fd 100644
+--- a/arch/um/sys-x86_64/shared/sysdep/system.h
++++ b/arch/um/sys-x86_64/shared/sysdep/system.h
+@@ -17,7 +17,7 @@
+ # define AT_VECTOR_SIZE_ARCH 1
+ #endif
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+
+ void default_idle(void);
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index ee0168d..096ed0e 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
+
+ config X86_32_LAZY_GS
+ def_bool y
+- depends on X86_32 && !CC_STACKPROTECTOR
++ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
+
+ config KTIME_SCALAR
+ def_bool X86_32
+@@ -985,6 +985,7 @@ config MICROCODE_OLD_INTERFACE
+
+ config X86_MSR
+ tristate "/dev/cpu/*/msr - Model-specific register support"
++ depends on !GRKERNSEC_KMEM
+ ---help---
+ This device gives privileged processes access to the x86
+ Model-Specific Registers (MSRs). It is a character device with
+@@ -1008,7 +1009,7 @@ choice
+
+ config NOHIGHMEM
+ bool "off"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ However, the address space of 32-bit x86 processors is only 4
+@@ -1045,7 +1046,7 @@ config NOHIGHMEM
+
+ config HIGHMEM4G
+ bool "4GB"
+- depends on !X86_NUMAQ
++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
+ ---help---
+ Select this if you have a 32-bit processor and between 1 and 4
+ gigabytes of physical RAM.
+@@ -1099,7 +1100,7 @@ config PAGE_OFFSET
+ hex
+ default 0xB0000000 if VMSPLIT_3G_OPT
+ default 0x80000000 if VMSPLIT_2G
+- default 0x78000000 if VMSPLIT_2G_OPT
++ default 0x70000000 if VMSPLIT_2G_OPT
+ default 0x40000000 if VMSPLIT_1G
+ default 0xC0000000
+ depends on X86_32
+@@ -1469,6 +1470,7 @@ config SECCOMP
+
+ config CC_STACKPROTECTOR
+ bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
++ depends on X86_64 || !PAX_MEMORY_UDEREF
+ ---help---
+ This option turns on the -fstack-protector GCC feature. This
+ feature puts, at the beginning of functions, a canary value on
+@@ -1526,6 +1528,7 @@ config KEXEC_JUMP
+ config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+ default "0x1000000"
++ range 0x400000 0x40000000
+ ---help---
+ This gives the physical address where the kernel is loaded.
+
+@@ -1590,6 +1593,7 @@ config PHYSICAL_ALIGN
+ hex
+ prompt "Alignment value to which kernel should be aligned" if X86_32
+ default "0x1000000"
++ range 0x400000 0x1000000 if PAX_KERNEXEC
+ range 0x2000 0x1000000
+ ---help---
+ This value puts the alignment restrictions on physical address
+@@ -1621,9 +1625,10 @@ config HOTPLUG_CPU
+ Say N if you want to disable CPU hotplug.
+
+ config COMPAT_VDSO
+- def_bool y
++ def_bool n
+ prompt "Compat VDSO support"
+ depends on X86_32 || IA32_EMULATION
++ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
+ ---help---
+ Map the 32-bit VDSO to the predictable old-style address too.
+ ---help---
+diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+index 0e566103..1a6b57e 100644
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
+
+ config X86_WP_WORKS_OK
+ def_bool y
+@@ -360,7 +360,7 @@ config X86_POPAD_OK
+
+ config X86_ALIGNMENT_16
+ def_bool y
+- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
+
+ config X86_INTEL_USERCOPY
+ def_bool y
+@@ -406,7 +406,7 @@ config X86_CMPXCHG64
+ # generates cmov.
+ config X86_CMOV
+ def_bool y
+- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
+
+ config X86_MINIMUM_CPU_FAMILY
+ int
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index d105f29..c928727 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -99,7 +99,7 @@ config X86_PTDUMP
+ config DEBUG_RODATA
+ bool "Write protect kernel read-only data structures"
+ default y
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && BROKEN
+ ---help---
+ Mark the kernel read-only data as write-protected in the pagetables,
+ in order to catch accidental (and incorrect) writes to such const
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index d2d24c9..0f21f8d 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
+ else
+ BITS := 64
+ UTS_MACHINE := x86_64
++ biarch := $(call cc-option,-m64)
+ CHECKFLAGS += -D__x86_64__ -m64
+
+ KBUILD_AFLAGS += -m64
+@@ -189,3 +190,12 @@ define archhelp
+ echo ' FDARGS="..." arguments for the booted kernel'
+ echo ' FDINITRD=file initrd for the booted kernel'
+ endef
++
++define OLD_LD
++
++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
++*** Please upgrade your binutils to 2.18 or newer
++endef
++
++archprepare:
++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index ec749c2..c2abaf02 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+ KBUILD_CFLAGS += $(call cc-option, -m32)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
+index 878e4b9..20537ab 100644
+--- a/arch/x86/boot/bitops.h
++++ b/arch/x86/boot/bitops.h
+@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+ u8 v;
+ const u32 *p = (const u32 *)addr;
+
+- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
+ return v;
+ }
+
+@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
+
+ static inline void set_bit(int nr, void *addr)
+ {
+- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
+ }
+
+ #endif /* BOOT_BITOPS_H */
+diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
+index 98239d2..f40214c 100644
+--- a/arch/x86/boot/boot.h
++++ b/arch/x86/boot/boot.h
+@@ -82,7 +82,7 @@ static inline void io_delay(void)
+ static inline u16 ds(void)
+ {
+ u16 seg;
+- asm("movw %%ds,%0" : "=rm" (seg));
++ asm volatile("movw %%ds,%0" : "=rm" (seg));
+ return seg;
+ }
+
+@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
+ static inline int memcmp(const void *s1, const void *s2, size_t len)
+ {
+ u8 diff;
+- asm("repe; cmpsb; setnz %0"
++ asm volatile("repe; cmpsb; setnz %0"
+ : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+ return diff;
+ }
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+index f8ed065..6764b5c 100644
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
+ KBUILD_CFLAGS += $(cflags-y)
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
+
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
+index f543b70..b60fba8 100644
+--- a/arch/x86/boot/compressed/head_32.S
++++ b/arch/x86/boot/compressed/head_32.S
+@@ -76,7 +76,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -149,7 +149,7 @@ relocated:
+ * and where it was actually loaded.
+ */
+ movl %ebp, %ebx
+- subl $LOAD_PHYSICAL_ADDR, %ebx
++ subl $____LOAD_PHYSICAL_ADDR, %ebx
+ jz 2f /* Nothing to be done if loaded at compiled addr. */
+ /*
+ * Process relocations.
+@@ -157,8 +157,7 @@ relocated:
+
+ 1: subl $4, %edi
+ movl (%edi), %ecx
+- testl %ecx, %ecx
+- jz 2f
++ jecxz 2f
+ addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
+ jmp 1b
+ 2:
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index 077e1b6..2c6b13b5 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -91,7 +91,7 @@ ENTRY(startup_32)
+ notl %eax
+ andl %eax, %ebx
+ #else
+- movl $LOAD_PHYSICAL_ADDR, %ebx
++ movl $____LOAD_PHYSICAL_ADDR, %ebx
+ #endif
+
+ /* Target address to relocate to for decompression */
+@@ -183,7 +183,7 @@ no_longmode:
+ hlt
+ jmp 1b
+
+-#include "../../kernel/verify_cpu_64.S"
++#include "../../kernel/verify_cpu.S"
+
+ /*
+ * Be careful here startup_64 needs to be at a predictable
+@@ -234,7 +234,7 @@ ENTRY(startup_64)
+ notq %rax
+ andq %rax, %rbp
+ #else
+- movq $LOAD_PHYSICAL_ADDR, %rbp
++ movq $____LOAD_PHYSICAL_ADDR, %rbp
+ #endif
+
+ /* Target address to relocate to for decompression */
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index 842b2a3..f00178b 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -288,7 +288,7 @@ static void parse_elf(void *output)
+ case PT_LOAD:
+ #ifdef CONFIG_RELOCATABLE
+ dest = output;
+- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
+ #else
+ dest = (void *)(phdr->p_paddr);
+ #endif
+@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
+ error("Destination address too large");
+ #endif
+ #ifndef CONFIG_RELOCATABLE
+- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
+ error("Wrong destination address");
+ #endif
+
+diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
+index bcbd36c..b1754af 100644
+--- a/arch/x86/boot/compressed/mkpiggy.c
++++ b/arch/x86/boot/compressed/mkpiggy.c
+@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
+
+ offs = (olen > ilen) ? olen - ilen : 0;
+ offs += olen >> 12; /* Add 8 bytes for each 32K block */
+- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
++ offs += 64*1024; /* Add 64K bytes slack */
+ offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
+
+ printf(".section \".rodata.compressed\",\"a\",@progbits\n");
+diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
+index bbeb0c3..1eb0571 100644
+--- a/arch/x86/boot/compressed/relocs.c
++++ b/arch/x86/boot/compressed/relocs.c
+@@ -10,8 +10,11 @@
+ #define USE_BSD
+ #include <endian.h>
+
++#include "../../../../include/linux/autoconf.h"
++
+ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ static Elf32_Ehdr ehdr;
++static Elf32_Phdr *phdr;
+ static unsigned long reloc_count, reloc_idx;
+ static unsigned long *relocs;
+
+@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
+
+ static int is_safe_abs_reloc(const char* sym_name)
+ {
+- int i;
++ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
+ if (!strcmp(sym_name, safe_abs_relocs[i]))
+@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
+ }
+ }
+
++static void read_phdrs(FILE *fp)
++{
++ unsigned int i;
++
++ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
++ if (!phdr) {
++ die("Unable to allocate %d program headers\n",
++ ehdr.e_phnum);
++ }
++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
++ die("Seek to %d failed: %s\n",
++ ehdr.e_phoff, strerror(errno));
++ }
++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
++ die("Cannot read ELF program headers: %s\n",
++ strerror(errno));
++ }
++ for(i = 0; i < ehdr.e_phnum; i++) {
++ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
++ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
++ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
++ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
++ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
++ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
++ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
++ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
++ }
++
++}
++
+ static void read_shdrs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ Elf32_Shdr shdr;
+
+ secs = calloc(ehdr.e_shnum, sizeof(struct section));
+@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
+
+ static void read_strtabs(FILE *fp)
+ {
+- int i;
++ unsigned int i;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_STRTAB) {
+@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
+
+ static void read_symtabs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
+
+ static void read_relocs(FILE *fp)
+ {
+- int i,j;
++ unsigned int i,j;
++ uint32_t base;
++
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
+ die("Cannot read symbol table: %s\n",
+ strerror(errno));
+ }
++ base = 0;
++ for (j = 0; j < ehdr.e_phnum; j++) {
++ if (phdr[j].p_type != PT_LOAD )
++ continue;
++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
++ continue;
++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
++ break;
++ }
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
+ Elf32_Rel *rel = &sec->reltab[j];
+- rel->r_offset = elf32_to_cpu(rel->r_offset);
++ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
+ rel->r_info = elf32_to_cpu(rel->r_info);
+ }
+ }
+@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
+
+ static void print_absolute_symbols(void)
+ {
+- int i;
++ unsigned int i;
+ printf("Absolute symbols\n");
+ printf(" Num: Value Size Type Bind Visibility Name\n");
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+
+ if (sec->shdr.sh_type != SHT_SYMTAB) {
+ continue;
+@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
+
+ static void print_absolute_relocs(void)
+ {
+- int i, printed = 0;
++ unsigned int i, printed = 0;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ struct section *sec_applies, *sec_symtab;
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+- int j;
++ unsigned int j;
+ if (sec->shdr.sh_type != SHT_REL) {
+ continue;
+ }
+@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
+
+ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+ {
+- int i;
++ unsigned int i;
+ /* Walk through the relocations */
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf32_Sym *sh_symtab;
+ struct section *sec_applies, *sec_symtab;
+- int j;
++ unsigned int j;
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL) {
+@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
+ if (sym->st_shndx == SHN_ABS) {
+ continue;
+ }
++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
++ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
++ continue;
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
++ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
++ continue;
++ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
++ continue;
++#endif
+ if (r_type == R_386_NONE || r_type == R_386_PC32) {
+ /*
+ * NONE can be ignored and and PC relative
+@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
+
+ static void emit_relocs(int as_text)
+ {
+- int i;
++ unsigned int i;
+ /* Count how many relocations I have and allocate space for them. */
+ reloc_count = 0;
+ walk_relocs(count_reloc);
+@@ -634,6 +693,7 @@ int main(int argc, char **argv)
+ fname, strerror(errno));
+ }
+ read_ehdr(fp);
++ read_phdrs(fp);
+ read_shdrs(fp);
+ read_strtabs(fp);
+ read_symtabs(fp);
+diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
+index 4d3ff03..e4972ff 100644
+--- a/arch/x86/boot/cpucheck.c
++++ b/arch/x86/boot/cpucheck.c
+@@ -74,7 +74,7 @@ static int has_fpu(void)
+ u16 fcw = -1, fsw = -1;
+ u32 cr0;
+
+- asm("movl %%cr0,%0" : "=r" (cr0));
++ asm volatile("movl %%cr0,%0" : "=r" (cr0));
+ if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+ cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+ asm volatile("movl %0,%%cr0" : : "r" (cr0));
+@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
+ {
+ u32 f0, f1;
+
+- asm("pushfl ; "
++ asm volatile("pushfl ; "
+ "pushfl ; "
+ "popl %0 ; "
+ "movl %0,%1 ; "
+@@ -115,7 +115,7 @@ static void get_flags(void)
+ set_bit(X86_FEATURE_FPU, cpu.flags);
+
+ if (has_eflag(X86_EFLAGS_ID)) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_intel_level),
+ "=b" (cpu_vendor[0]),
+ "=d" (cpu_vendor[1]),
+@@ -124,7 +124,7 @@ static void get_flags(void)
+
+ if (max_intel_level >= 0x00000001 &&
+ max_intel_level <= 0x0000ffff) {
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (tfms),
+ "=c" (cpu.flags[4]),
+ "=d" (cpu.flags[0])
+@@ -136,7 +136,7 @@ static void get_flags(void)
+ cpu.model += ((tfms >> 16) & 0xf) << 4;
+ }
+
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "=a" (max_amd_level)
+ : "a" (0x80000000)
+ : "ebx", "ecx", "edx");
+@@ -144,7 +144,7 @@ static void get_flags(void)
+ if (max_amd_level >= 0x80000001 &&
+ max_amd_level <= 0x8000ffff) {
+ u32 eax = 0x80000001;
+- asm("cpuid"
++ asm volatile("cpuid"
+ : "+a" (eax),
+ "=c" (cpu.flags[6]),
+ "=d" (cpu.flags[1])
+@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_K7_HWCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax &= ~(1 << 15);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ get_flags(); /* Make sure it really did something */
+ err = check_flags();
+@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 ecx = MSR_VIA_FCR;
+ u32 eax, edx;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ eax |= (1<<1)|(1<<7);
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ set_bit(X86_FEATURE_CX8, cpu.flags);
+ err = check_flags();
+@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
+ u32 eax, edx;
+ u32 level = 1;
+
+- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
+- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
+- asm("cpuid"
++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
++ asm volatile("cpuid"
+ : "+a" (level), "=d" (cpu.flags[0])
+ : : "ecx", "ebx");
+- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
+
+ err = check_flags();
+ }
+diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
+index b31cc54..afdbea1 100644
+--- a/arch/x86/boot/header.S
++++ b/arch/x86/boot/header.S
+@@ -224,10 +224,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
+ # single linked list of
+ # struct setup_data
+
+-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
+
+ #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
++#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
++#else
+ #define VO_INIT_SIZE (VO__end - VO__text)
++#endif
+ #if ZO_INIT_SIZE > VO_INIT_SIZE
+ #define INIT_SIZE ZO_INIT_SIZE
+ #else
+diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
+index cae3feb..ff8ff2a 100644
+--- a/arch/x86/boot/memory.c
++++ b/arch/x86/boot/memory.c
+@@ -19,7 +19,7 @@
+
+ static int detect_memory_e820(void)
+ {
+- int count = 0;
++ unsigned int count = 0;
+ struct biosregs ireg, oreg;
+ struct e820entry *desc = boot_params.e820_map;
+ static struct e820entry buf; /* static so it is zeroed */
+diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
+index 11e8c6e..fdbb1ed 100644
+--- a/arch/x86/boot/video-vesa.c
++++ b/arch/x86/boot/video-vesa.c
+@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
+
+ boot_params.screen_info.vesapm_seg = oreg.es;
+ boot_params.screen_info.vesapm_off = oreg.di;
++ boot_params.screen_info.vesapm_size = oreg.cx;
+ }
+
+ /*
+diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c
+index 819caa1..f32eba9 100644
+--- a/arch/x86/boot/video-vga.c
++++ b/arch/x86/boot/video-vga.c
+@@ -41,13 +41,12 @@ static __videocard video_vga;
+ static u8 vga_set_basic_mode(void)
+ {
+ struct biosregs ireg, oreg;
+- u16 ax;
+ u8 rows;
+ u8 mode;
+
+ initregs(&ireg);
+
+- ax = 0x0f00;
++ ireg.ax = 0x0f00;
+ intcall(0x10, &ireg, &oreg);
+ mode = oreg.al;
+
+diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
+index d42da38..787cdf3 100644
+--- a/arch/x86/boot/video.c
++++ b/arch/x86/boot/video.c
+@@ -90,7 +90,7 @@ static void store_mode_params(void)
+ static unsigned int get_entry(void)
+ {
+ char entry_buf[4];
+- int i, len = 0;
++ unsigned int i, len = 0;
+ int key;
+ unsigned int v;
+
+diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
+index 5b577d5..3c1fed4 100644
+--- a/arch/x86/crypto/aes-x86_64-asm_64.S
++++ b/arch/x86/crypto/aes-x86_64-asm_64.S
+@@ -8,6 +8,8 @@
+ * including this sentence is retained in full.
+ */
+
++#include <asm/alternative-asm.h>
++
+ .extern crypto_ft_tab
+ .extern crypto_it_tab
+ .extern crypto_fl_tab
+@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
+ je B192; \
+ leaq 32(r9),r9;
+
++#define ret pax_force_retaddr 0, 1; ret
++
+ #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
+ movq r1,r2; \
+ movq r3,r4; \
+diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
+index eb0566e..e3ebad8 100644
+--- a/arch/x86/crypto/aesni-intel_asm.S
++++ b/arch/x86/crypto/aesni-intel_asm.S
+@@ -16,6 +16,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ .text
+
+@@ -52,6 +53,7 @@ _key_expansion_256a:
+ pxor %xmm1, %xmm0
+ movaps %xmm0, (%rcx)
+ add $0x10, %rcx
++ pax_force_retaddr_bts
+ ret
+
+ _key_expansion_192a:
+@@ -75,6 +77,7 @@ _key_expansion_192a:
+ shufps $0b01001110, %xmm2, %xmm1
+ movaps %xmm1, 16(%rcx)
+ add $0x20, %rcx
++ pax_force_retaddr_bts
+ ret
+
+ _key_expansion_192b:
+@@ -93,6 +96,7 @@ _key_expansion_192b:
+
+ movaps %xmm0, (%rcx)
+ add $0x10, %rcx
++ pax_force_retaddr_bts
+ ret
+
+ _key_expansion_256b:
+@@ -104,6 +108,7 @@ _key_expansion_256b:
+ pxor %xmm1, %xmm2
+ movaps %xmm2, (%rcx)
+ add $0x10, %rcx
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
+ cmp %rcx, %rdi
+ jb .Ldec_key_loop
+ xor %rax, %rax
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_set_key)
+
+ /*
+ * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
+ movups (INP), STATE # input
+ call _aesni_enc1
+ movups STATE, (OUTP) # output
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_enc)
+
+ /*
+ * _aesni_enc1: internal ABI
+@@ -319,6 +328,7 @@ _aesni_enc1:
+ movaps 0x70(TKEYP), KEY
+ # aesenclast KEY, STATE # last round
+ .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -482,6 +492,7 @@ _aesni_enc4:
+ .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
+ # aesenclast KEY, STATE4
+ .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
+ movups (INP), STATE # input
+ call _aesni_dec1
+ movups STATE, (OUTP) #output
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_dec)
+
+ /*
+ * _aesni_dec1: internal ABI
+@@ -563,6 +576,7 @@ _aesni_dec1:
+ movaps 0x70(TKEYP), KEY
+ # aesdeclast KEY, STATE # last round
+ .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -726,6 +740,7 @@ _aesni_dec4:
+ .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
+ # aesdeclast KEY, STATE4
+ .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
++ pax_force_retaddr_bts
+ ret
+
+ /*
+@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
+ cmp $16, LEN
+ jge .Lecb_enc_loop1
+ .Lecb_enc_ret:
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_ecb_enc)
+
+ /*
+ * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
+ cmp $16, LEN
+ jge .Lecb_dec_loop1
+ .Lecb_dec_ret:
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_ecb_dec)
+
+ /*
+ * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
+ jge .Lcbc_enc_loop
+ movups STATE, (IVP)
+ .Lcbc_enc_ret:
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_cbc_enc)
+
+ /*
+ * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
+@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
+ .Lcbc_dec_ret:
+ movups IV, (IVP)
+ .Lcbc_dec_just_ret:
++ pax_force_retaddr 0, 1
+ ret
++ENDPROC(aesni_cbc_dec)
+diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+index 6214a9b..1f4fc9a 100644
+--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
++++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
+@@ -1,3 +1,5 @@
++#include <asm/alternative-asm.h>
++
+ # enter ECRYPT_encrypt_bytes
+ .text
+ .p2align 5
+@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr 0, 1
+ ret
+ # bytesatleast65:
+ ._bytesatleast65:
+@@ -891,6 +894,7 @@ ECRYPT_keysetup:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+ # enter ECRYPT_ivsetup
+ .text
+@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
+ add %r11,%rsp
+ mov %rdi,%rax
+ mov %rsi,%rdx
++ pax_force_retaddr
+ ret
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
+index 35974a5..5662ae2 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
+@@ -21,6 +21,7 @@
+ .text
+
+ #include <asm/asm-offsets.h>
++#include <asm/alternative-asm.h>
+
+ #define a_offset 0
+ #define b_offset 4
+@@ -269,6 +270,7 @@ twofish_enc_blk:
+
+ popq R1
+ movq $1,%rax
++ pax_force_retaddr 0, 1
+ ret
+
+ twofish_dec_blk:
+@@ -321,4 +323,5 @@ twofish_dec_blk:
+
+ popq R1
+ movq $1,%rax
++ pax_force_retaddr 0, 1
+ ret
+diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
+index 14531ab..bc68a7b 100644
+--- a/arch/x86/ia32/ia32_aout.c
++++ b/arch/x86/ia32/ia32_aout.c
+@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
+ unsigned long dump_start, dump_size;
+ struct user32 dump;
+
++ memset(&dump, 0, sizeof(dump));
++
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ has_dumped = 1;
+@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
+ dump_size = dump.u_ssize << PAGE_SHIFT;
+ DUMP_WRITE(dump_start, dump_size);
+ }
+- /*
+- * Finally dump the task struct. Not be used by gdb, but
+- * could be useful
+- */
+- set_fs(KERNEL_DS);
+- DUMP_WRITE(current, sizeof(*current));
+ end_coredump:
+ set_fs(fs);
+ return has_dumped;
+@@ -327,6 +323,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
+ current->mm->free_area_cache = TASK_UNMAPPED_BASE;
+ current->mm->cached_hole_size = 0;
+
++ retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
++ if (retval < 0) {
++ /* Someone check-me: is this error path enough? */
++ send_sig(SIGKILL, current, 0);
++ return retval;
++ }
++
+ install_exec_creds(bprm);
+ current->flags &= ~PF_FORKNOEXEC;
+
+@@ -422,13 +425,6 @@ beyond_if:
+
+ set_brk(current->mm->start_brk, current->mm->brk);
+
+- retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
+- if (retval < 0) {
+- /* Someone check-me: is this error path enough? */
+- send_sig(SIGKILL, current, 0);
+- return retval;
+- }
+-
+ current->mm->start_stack =
+ (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
+ /* start thread */
+diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
+index 588a7aa..a3468b0 100644
+--- a/arch/x86/ia32/ia32_signal.c
++++ b/arch/x86/ia32/ia32_signal.c
+@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
+ }
+ seg = get_fs();
+ set_fs(KERNEL_DS);
+- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
++ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
+ set_fs(seg);
+ if (ret >= 0 && uoss_ptr) {
+ if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
+@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
+ */
+ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+ size_t frame_size,
+- void **fpstate)
++ void __user **fpstate)
+ {
+ unsigned long sp;
+
+@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+
+ if (used_math()) {
+ sp = sp - sig_xstate_ia32_size;
+- *fpstate = (struct _fpstate_ia32 *) sp;
++ *fpstate = (struct _fpstate_ia32 __user *) sp;
+ if (save_i387_xstate_ia32(*fpstate) < 0)
+ return (void __user *) -1L;
+ }
+@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
+ sp -= frame_size;
+ /* Align the stack pointer according to the i386 ABI,
+ * i.e. so that on function entry ((sp + 4) & 15) == 0. */
+- sp = ((sp + 4) & -16ul) - 4;
++ sp = ((sp - 12) & -16ul) - 4;
+ return (void __user *) sp;
+ }
+
+@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
+ * These are actually not used anymore, but left because some
+ * gdb versions depend on them as a marker.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ 0xb8,
+ __NR_ia32_rt_sigreturn,
+ 0x80cd,
+- 0,
++ 0
+ };
+
+ frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
+@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
++ else if (current->mm->context.vdso)
++ /* Return stub is in 32bit vsyscall page */
++ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+ else
+- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
+- rt_sigreturn);
++ restorer = &frame->retcode;
+ put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+ /*
+ * Not actually used anymore, but left because some gdb
+ * versions need it.
+ */
+- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
++ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
+ } put_user_catch(err);
+
+ if (err)
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index 4edd8eb..273579e 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -13,7 +13,9 @@
+ #include <asm/thread_info.h>
+ #include <asm/segment.h>
+ #include <asm/irqflags.h>
++#include <asm/pgtable.h>
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
+ ENDPROC(native_irq_enable_sysexit)
+ #endif
+
++ .macro pax_enter_kernel_user
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ pushq %r11
++ call pax_randomize_kstack
++ popq %r11
++ popq %rax
++#endif
++ .endm
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
+ /*
+ * 32bit SYSENTER instruction entry.
+ *
+@@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
+ CFI_REGISTER rsp,rbp
+ SWAPGS_UNSAFE_STACK
+ movq PER_CPU_VAR(kernel_stack), %rsp
+- addq $(KERNEL_STACK_OFFSET),%rsp
+- /*
+- * No need to follow this irqs on/off section: the syscall
+- * disabled irqs, here we enable it straight after entry:
+- */
+- ENABLE_INTERRUPTS(CLBR_NONE)
+ movl %ebp,%ebp /* zero extension */
+ pushq $__USER32_DS
+ CFI_ADJUST_CFA_OFFSET 8
+@@ -135,28 +157,47 @@ ENTRY(ia32_sysenter_target)
+ pushfq
+ CFI_ADJUST_CFA_OFFSET 8
+ /*CFI_REL_OFFSET rflags,0*/
+- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
+- CFI_REGISTER rip,r10
++ orl $X86_EFLAGS_IF,(%rsp)
++ GET_THREAD_INFO(%r11)
++ movl TI_sysenter_return(%r11), %r11d
++ CFI_REGISTER rip,r11
+ pushq $__USER32_CS
+ CFI_ADJUST_CFA_OFFSET 8
+ /*CFI_REL_OFFSET cs,0*/
+ movl %eax, %eax
+- pushq %r10
++ pushq %r11
+ CFI_ADJUST_CFA_OFFSET 8
+ CFI_REL_OFFSET rip,0
+ pushq %rax
+ CFI_ADJUST_CFA_OFFSET 8
+ cld
+ SAVE_ARGS 0,0,1
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs, here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
+ /* no need to do an access_ok check here because rbp has been
+ 32bit zero extended */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%r11
++ add %r11,%rbp
++#endif
++
+ 1: movl (%rbp),%ebp
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+ .previous
+- GET_THREAD_INFO(%r10)
+- orl $TS_COMPAT,TI_status(%r10)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ CFI_REMEMBER_STATE
+ jnz sysenter_tracesys
+ cmpq $(IA32_NR_syscalls-1),%rax
+@@ -166,13 +207,15 @@ sysenter_do_call:
+ sysenter_dispatch:
+ call *ia32_sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
+- GET_THREAD_INFO(%r10)
++ GET_THREAD_INFO(%r11)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
+ jnz sysexit_audit
+ sysexit_from_sys_call:
+- andl $~TS_COMPAT,TI_status(%r10)
++ pax_exit_kernel_user
++ pax_erase_kstack
++ andl $~TS_COMPAT,TI_status(%r11)
+ /* clear IF, that popfq doesn't enable interrupts early */
+ andl $~0x200,EFLAGS-R11(%rsp)
+ movl RIP-R11(%rsp),%edx /* User %eip */
+@@ -200,6 +243,9 @@ sysexit_from_sys_call:
+ movl %eax,%esi /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+@@ -211,7 +257,7 @@ sysexit_from_sys_call:
+ .endm
+
+ .macro auditsys_exit exit
+- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
++ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jnz ia32_ret_from_sys_call
+ TRACE_IRQS_ON
+ sti
+@@ -221,12 +267,12 @@ sysexit_from_sys_call:
+ movzbl %al,%edi /* zero-extend that into %edi */
+ inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
+ call audit_syscall_exit
+- GET_THREAD_INFO(%r10)
++ GET_THREAD_INFO(%r11)
+ movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
+ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
+ cli
+ TRACE_IRQS_OFF
+- testl %edi,TI_flags(%r10)
++ testl %edi,TI_flags(%r11)
+ jz \exit
+ CLEAR_RREGS -ARGOFFSET
+ jmp int_with_check
+@@ -244,7 +290,7 @@ sysexit_audit:
+
+ sysenter_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jz sysenter_auditsys
+ #endif
+ SAVE_REST
+@@ -256,6 +302,9 @@ sysenter_tracesys:
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
++
++ pax_erase_kstack
++
+ jmp sysenter_do_call
+ CFI_ENDPROC
+ ENDPROC(ia32_sysenter_target)
+@@ -283,19 +332,25 @@ ENDPROC(ia32_sysenter_target)
+ ENTRY(ia32_cstar_target)
+ CFI_STARTPROC32 simple
+ CFI_SIGNAL_FRAME
+- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
++ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
+ SWAPGS_UNSAFE_STACK
+ movl %esp,%r8d
+ CFI_REGISTER rsp,r8
+ movq PER_CPU_VAR(kernel_stack),%rsp
++ SAVE_ARGS 8*6,1,1
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ /*
+ * No need to follow this irqs on/off section: the syscall
+ * disabled irqs and here we enable it straight after entry:
+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
+- SAVE_ARGS 8,1,1
+ movl %eax,%eax /* zero extension */
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ movq %rcx,RIP-ARGOFFSET(%rsp)
+@@ -311,13 +366,19 @@ ENTRY(ia32_cstar_target)
+ /* no need to do an access_ok check here because r8 has been
+ 32bit zero extended */
+ /* hardware stack frame is complete now */
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov $PAX_USER_SHADOW_BASE,%r11
++ add %r11,%r8
++#endif
++
+ 1: movl (%r8),%r9d
+ .section __ex_table,"a"
+ .quad 1b,ia32_badarg
+ .previous
+- GET_THREAD_INFO(%r10)
+- orl $TS_COMPAT,TI_status(%r10)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ CFI_REMEMBER_STATE
+ jnz cstar_tracesys
+ cmpq $IA32_NR_syscalls-1,%rax
+@@ -327,13 +388,15 @@ cstar_do_call:
+ cstar_dispatch:
+ call *ia32_sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
+- GET_THREAD_INFO(%r10)
++ GET_THREAD_INFO(%r11)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
++ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
+ jnz sysretl_audit
+ sysretl_from_sys_call:
+- andl $~TS_COMPAT,TI_status(%r10)
++ pax_exit_kernel_user
++ pax_erase_kstack
++ andl $~TS_COMPAT,TI_status(%r11)
+ RESTORE_ARGS 1,-ARG_SKIP,1,1,1
+ movl RIP-ARGOFFSET(%rsp),%ecx
+ CFI_REGISTER rip,rcx
+@@ -361,7 +424,7 @@ sysretl_audit:
+
+ cstar_tracesys:
+ #ifdef CONFIG_AUDITSYSCALL
+- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
++ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
+ jz cstar_auditsys
+ #endif
+ xchgl %r9d,%ebp
+@@ -375,6 +438,9 @@ cstar_tracesys:
+ xchgl %ebp,%r9d
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
++
++ pax_erase_kstack
++
+ jmp cstar_do_call
+ END(ia32_cstar_target)
+
+@@ -415,11 +481,6 @@ ENTRY(ia32_syscall)
+ CFI_REL_OFFSET rip,RIP-RIP
+ PARAVIRT_ADJUST_EXCEPTION_FRAME
+ SWAPGS
+- /*
+- * No need to follow this irqs on/off section: the syscall
+- * disabled irqs and here we enable it straight after entry:
+- */
+- ENABLE_INTERRUPTS(CLBR_NONE)
+ movl %eax,%eax
+ pushq %rax
+ CFI_ADJUST_CFA_OFFSET 8
+@@ -427,9 +488,20 @@ ENTRY(ia32_syscall)
+ /* note the registers are not zero extended to the sf.
+ this could be a problem. */
+ SAVE_ARGS 0,0,1
+- GET_THREAD_INFO(%r10)
+- orl $TS_COMPAT,TI_status(%r10)
+- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
++ GET_THREAD_INFO(%r11)
++ orl $TS_COMPAT,TI_status(%r11)
++ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
+ jnz ia32_tracesys
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja ia32_badsys
+@@ -452,6 +524,9 @@ ia32_tracesys:
+ RESTORE_REST
+ cmpq $(IA32_NR_syscalls-1),%rax
+ ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
++
++ pax_erase_kstack
++
+ jmp ia32_do_call
+ END(ia32_syscall)
+
+@@ -462,6 +537,7 @@ ia32_badsys:
+
+ quiet_ni_syscall:
+ movq $-ENOSYS,%rax
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+
+diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
+index 016218c..c5cd85d 100644
+--- a/arch/x86/ia32/sys_ia32.c
++++ b/arch/x86/ia32/sys_ia32.c
+@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
+ */
+ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
+ {
+- typeof(ubuf->st_uid) uid = 0;
+- typeof(ubuf->st_gid) gid = 0;
++ typeof(((struct stat64 *)0)->st_uid) uid = 0;
++ typeof(((struct stat64 *)0)->st_gid) gid = 0;
+ SET_UID(uid, stat->uid);
+ SET_GID(gid, stat->gid);
+ if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
+@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
+ }
+ set_fs(KERNEL_DS);
+ ret = sys_rt_sigprocmask(how,
+- set ? (sigset_t __user *)&s : NULL,
+- oset ? (sigset_t __user *)&s : NULL,
++ set ? (sigset_t __force_user *)&s : NULL,
++ oset ? (sigset_t __force_user *)&s : NULL,
+ sigsetsize);
+ set_fs(old_fs);
+ if (ret)
+@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
++ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
+ set_fs(old_fs);
+ if (put_compat_timespec(&t, interval))
+ return -EFAULT;
+@@ -381,13 +381,13 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
+ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
+ compat_size_t sigsetsize)
+ {
+- sigset_t s;
++ sigset_t s = { };
+ compat_sigset_t s32;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
++ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
+ set_fs(old_fs);
+ if (!ret) {
+ switch (_NSIG_WORDS) {
+@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
+ if (copy_siginfo_from_user32(&info, uinfo))
+ return -EFAULT;
+ set_fs(KERNEL_DS);
+- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
++ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
+ set_fs(old_fs);
+ return ret;
+ }
+@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
+ return -EFAULT;
+
+ set_fs(KERNEL_DS);
+- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
++ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
+ count);
+ set_fs(old_fs);
+
+diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
+index e2077d3..17d07ad 100644
+--- a/arch/x86/include/asm/alternative-asm.h
++++ b/arch/x86/include/asm/alternative-asm.h
+@@ -8,10 +8,10 @@
+
+ #ifdef CONFIG_SMP
+ .macro LOCK_PREFIX
+-1: lock
++672: lock
+ .section .smp_locks,"a"
+ .align 4
+- X86_ALIGN 1b
++ X86_ALIGN 672b
+ .previous
+ .endm
+ #else
+@@ -19,4 +19,43 @@
+ .endm
+ #endif
+
++#ifdef KERNEXEC_PLUGIN
++ .macro pax_force_retaddr_bts rip=0
++ btsq $63,\rip(%rsp)
++ .endm
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++ .macro pax_force_retaddr rip=0, reload=0
++ btsq $63,\rip(%rsp)
++ .endm
++ .macro pax_force_fptr ptr
++ btsq $63,\ptr
++ .endm
++ .macro pax_set_fptr_mask
++ .endm
++#endif
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
++ .macro pax_force_retaddr rip=0, reload=0
++ .if \reload
++ pax_set_fptr_mask
++ .endif
++ orq %r10,\rip(%rsp)
++ .endm
++ .macro pax_force_fptr ptr
++ orq %r10,\ptr
++ .endm
++ .macro pax_set_fptr_mask
++ movabs $0x8000000000000000,%r10
++ .endm
++#endif
++#else
++ .macro pax_force_retaddr rip=0, reload=0
++ .endm
++ .macro pax_force_fptr ptr
++ .endm
++ .macro pax_force_retaddr_bts rip=0
++ .endm
++ .macro pax_set_fptr_mask
++ .endm
++#endif
++
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index c240efc..fdfadf3 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+- ".section .altinstr_replacement, \"ax\"\n" \
++ ".section .altinstr_replacement, \"a\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous"
+
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 474d80d..1f97d58 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+
+-extern unsigned int apic_verbosity;
++extern int apic_verbosity;
+ extern int local_apic_timer_c2_ok;
+
+ extern int disable_apic;
+diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
+index 20370c6..a2eb9b0 100644
+--- a/arch/x86/include/asm/apm.h
++++ b/arch/x86/include/asm/apm.h
+@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
+index dc5a667..7a2470f 100644
+--- a/arch/x86/include/asm/atomic_32.h
++++ b/arch/x86/include/asm/atomic_32.h
+@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
+ }
+
+ /**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
++
++/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
+ }
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
+ */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
+ */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter)
++ : "ir" (i));
++}
++
++/**
++ * atomic_sub_unchecked - subtract integer from atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "+m" (v->counter)
+ : "ir" (i));
+ }
+@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+ */
+ static inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
+ */
+ static inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (v->counter));
++}
++
++/**
++ * atomic_dec_unchecked - decrement atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decl %0\n"
+ : "+m" (v->counter));
+ }
+
+@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "into\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
++ : "+m" (v->counter), "=qm" (c)
++ : : "memory");
++ return c != 0;
++}
++
++/**
++ * atomic_inc_and_test_unchecked - increment and test
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1
++ * and returns true if the result is zero, or false for all
++ * other cases.
++ */
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ unsigned char c;
++
++ asm volatile(LOCK_PREFIX "incl %0\n"
++ "sete %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (v->counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
+- asm volatile(LOCK_PREFIX "xaddl %0, %1"
++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movl %0, %1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
+ }
+
+ /**
++ * atomic_add_return_unchecked - add integer and return
++ * @v: pointer of type atomic_unchecked_t
++ * @i: integer value to add
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ int __i;
++#ifdef CONFIG_M386
++ unsigned long flags;
++ if (unlikely(boot_cpu_data.x86 <= 3))
++ goto no_xadd;
++#endif
++ /* Modern 486+ processor */
++ __i = i;
++ asm volatile(LOCK_PREFIX "xaddl %0, %1"
++ : "+r" (i), "+m" (v->counter)
++ : : "memory");
++ return i + __i;
++
++#ifdef CONFIG_M386
++no_xadd: /* Legacy 386 processor */
++ local_irq_save(flags);
++ __i = atomic_read_unchecked(v);
++ atomic_set_unchecked(v, i + __i);
++ local_irq_restore(flags);
++ return i + __i;
++#endif
++}
++
++/**
+ * atomic_sub_return - subtract integer and return
+ * @v: pointer of type atomic_t
+ * @i: integer value to subtract
+@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+ return cmpxchg(&v->counter, old, new);
+ }
+
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
+ static inline int atomic_xchg(atomic_t *v, int new)
+ {
+ return xchg(&v->counter, new);
+ }
+
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
+ /**
+ * atomic_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+@@ -227,32 +439,73 @@ static inline int atomic_xchg(atomic_t *v, int new)
+ */
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+
+ /* These are x86-specific, used by some header files */
+-#define atomic_clear_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "andl %0,%1" \
+- : : "r" (~(mask)), "m" (*(addr)) : "memory")
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
+
+-#define atomic_set_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "orl %0,%1" \
+- : : "r" (mask), "m" (*(addr)) : "memory")
++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
++
++static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
++
++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
+
+ /* Atomic operations are already serializing on x86 */
+ #define smp_mb__before_atomic_dec() barrier()
+@@ -266,9 +519,18 @@ typedef struct {
+ u64 __aligned(8) counter;
+ } atomic64_t;
+
++#ifdef CONFIG_PAX_REFCOUNT
++typedef struct {
++ u64 __aligned(8) counter;
++} atomic64_unchecked_t;
++#else
++typedef atomic64_t atomic64_unchecked_t;
++#endif
++
+ #define ATOMIC64_INIT(val) { (val) }
+
+ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
++extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
+
+ /**
+ * atomic64_xchg - xchg atomic64 variable
+@@ -279,6 +541,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
+ * the old value.
+ */
+ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
++extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
+
+ /**
+ * atomic64_set - set atomic64 variable
+@@ -290,6 +553,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
+ extern void atomic64_set(atomic64_t *ptr, u64 new_val);
+
+ /**
++ * atomic64_unchecked_set - set atomic64 variable
++ * @ptr: pointer to type atomic64_unchecked_t
++ * @new_val: value to assign
++ *
++ * Atomically sets the value of @ptr to @new_val.
++ */
++extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
++
++/**
+ * atomic64_read - read atomic64 variable
+ * @ptr: pointer to type atomic64_t
+ *
+@@ -317,7 +589,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
+ return res;
+ }
+
+-extern u64 atomic64_read(atomic64_t *ptr);
++/**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @ptr: pointer to type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @ptr and returns it.
++ */
++static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
++{
++ u64 res;
++
++ /*
++ * Note, we inline this atomic64_unchecked_t primitive because
++ * it only clobbers EAX/EDX and leaves the others
++ * untouched. We also (somewhat subtly) rely on the
++ * fact that cmpxchg8b returns the current 64-bit value
++ * of the memory location we are touching:
++ */
++ asm volatile(
++ "mov %%ebx, %%eax\n\t"
++ "mov %%ecx, %%edx\n\t"
++ LOCK_PREFIX "cmpxchg8b %1\n"
++ : "=&A" (res)
++ : "m" (*ptr)
++ );
++
++ return res;
++}
+
+ /**
+ * atomic64_add_return - add and return
+@@ -332,8 +630,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
+ * Other variants with different arithmetic operators:
+ */
+ extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
++extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
+ extern u64 atomic64_inc_return(atomic64_t *ptr);
++extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
+ extern u64 atomic64_dec_return(atomic64_t *ptr);
++extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
+
+ /**
+ * atomic64_add - add integer to atomic64 variable
+@@ -345,6 +646,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
+ extern void atomic64_add(u64 delta, atomic64_t *ptr);
+
+ /**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @delta: integer value to add
++ * @ptr: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @delta to @ptr.
++ */
++extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
++
++/**
+ * atomic64_sub - subtract the atomic64 variable
+ * @delta: integer value to subtract
+ * @ptr: pointer to type atomic64_t
+@@ -354,6 +664,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
+ extern void atomic64_sub(u64 delta, atomic64_t *ptr);
+
+ /**
++ * atomic64_sub_unchecked - subtract the atomic64 variable
++ * @delta: integer value to subtract
++ * @ptr: pointer to type atomic64_unchecked_t
++ *
++ * Atomically subtracts @delta from @ptr.
++ */
++extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
++
++/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @delta: integer value to subtract
+ * @ptr: pointer to type atomic64_t
+@@ -373,6 +692,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
+ extern void atomic64_inc(atomic64_t *ptr);
+
+ /**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @ptr: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @ptr by 1.
++ */
++extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
++
++/**
+ * atomic64_dec - decrement atomic64 variable
+ * @ptr: pointer to type atomic64_t
+ *
+@@ -381,6 +708,14 @@ extern void atomic64_inc(atomic64_t *ptr);
+ extern void atomic64_dec(atomic64_t *ptr);
+
+ /**
++ * atomic64_dec_unchecked - decrement atomic64 variable
++ * @ptr: pointer to type atomic64_unchecked_t
++ *
++ * Atomically decrements @ptr by 1.
++ */
++extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
++
++/**
+ * atomic64_dec_and_test - decrement and test
+ * @ptr: pointer to type atomic64_t
+ *
+diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
+index d605dc2..72cb5cd 100644
+--- a/arch/x86/include/asm/atomic_64.h
++++ b/arch/x86/include/asm/atomic_64.h
+@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
+ }
+
+ /**
++ * atomic_read_unchecked - read atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ */
++static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
++{
++ return v->counter;
++}
++
++/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
+ }
+
+ /**
++ * atomic_set_unchecked - set atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
+ */
+ static inline void atomic_add(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "addl %1,%0"
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "ir" (i), "m" (v->counter));
++}
++
++/**
++ * atomic_add_unchecked - add integer to atomic variable
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "addl %1,%0\n"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
+ }
+@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
+ */
+ static inline void atomic_sub(int i, atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subl %1,%0"
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "ir" (i), "m" (v->counter));
++}
++
++/**
++ * atomic_sub_unchecked - subtract the atomic variable
++ * @i: integer value to subtract
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically subtracts @i from @v.
++ */
++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "subl %1,%0\n"
+ : "=m" (v->counter)
+ : "ir" (i), "m" (v->counter));
+ }
+@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
+ */
+ static inline void atomic_inc(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "incl %0"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic_inc_unchecked - increment atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "incl %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
+ */
+ static inline void atomic_dec(atomic_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decl %0"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic_dec_unchecked - decrement atomic variable
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decl %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decl %0; sete %1"
++ asm volatile(LOCK_PREFIX "decl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incl %0; sete %1"
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
++ : "=m" (v->counter), "=qm" (c)
++ : "m" (v->counter) : "memory");
++ return c != 0;
++}
++
++/**
++ * atomic_inc_and_test_unchecked - increment and test
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically increments @v by 1
++ * and returns true if the result is zero, or false for all
++ * other cases.
++ */
++static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
++{
++ unsigned char c;
++
++ asm volatile(LOCK_PREFIX "incl %0\n"
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "ir" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
+ static inline int atomic_add_return(int i, atomic_t *v)
+ {
+ int __i = i;
+- asm volatile(LOCK_PREFIX "xaddl %0, %1"
++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movl %0, %1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+r" (i), "+m" (v->counter)
++ : : "memory");
++ return i + __i;
++}
++
++/**
++ * atomic_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer of type atomic_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
++{
++ int __i = i;
++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
+ }
+
+ #define atomic_inc_return(v) (atomic_add_return(1, v))
++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
++{
++ return atomic_add_return_unchecked(1, v);
++}
+ #define atomic_dec_return(v) (atomic_sub_return(1, v))
+
+ /* The 64-bit atomic type */
+@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
+ }
+
+ /**
++ * atomic64_read_unchecked - read atomic64 variable
++ * @v: pointer of type atomic64_unchecked_t
++ *
++ * Atomically reads the value of @v.
++ * Doesn't imply a read memory barrier.
++ */
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return v->counter;
++}
++
++/**
+ * atomic64_set - set atomic64 variable
+ * @v: pointer to type atomic64_t
+ * @i: required value
+@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
+ }
+
+ /**
++ * atomic64_set_unchecked - set atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ * @i: required value
++ *
++ * Atomically sets the value of @v to @i.
++ */
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
++
++/**
+ * atomic64_add - add integer to atomic64 variable
+ * @i: integer value to add
+ * @v: pointer to type atomic64_t
+@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
+ */
+ static inline void atomic64_add(long i, atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "addq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "er" (i), "m" (v->counter));
++}
++
++/**
++ * atomic64_add_unchecked - add integer to atomic64 variable
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v.
++ */
++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "addq %1,%0"
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
+ */
+ static inline void atomic64_sub(long i, atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "subq %1,%0"
++ asm volatile(LOCK_PREFIX "subq %1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "=m" (v->counter)
+ : "er" (i), "m" (v->counter));
+ }
+@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
++ asm volatile(LOCK_PREFIX "subq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "addq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+ */
+ static inline void atomic64_inc(atomic64_t *v)
+ {
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_inc_unchecked - increment atomic64 variable
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically increments @v by 1.
++ */
++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
++{
+ asm volatile(LOCK_PREFIX "incq %0"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
+ */
+ static inline void atomic64_dec(atomic64_t *v)
+ {
+- asm volatile(LOCK_PREFIX "decq %0"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=m" (v->counter)
++ : "m" (v->counter));
++}
++
++/**
++ * atomic64_dec_unchecked - decrement atomic64 variable
++ * @v: pointer to type atomic64_t
++ *
++ * Atomically decrements @v by 1.
++ */
++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "decq %0\n"
+ : "=m" (v->counter)
+ : "m" (v->counter));
+ }
+@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "decq %0; sete %1"
++ asm volatile(LOCK_PREFIX "decq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "incq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "incq %0; sete %1"
++ asm volatile(LOCK_PREFIX "incq %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decq %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "m" (v->counter) : "memory");
+ return c != 0;
+@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
+ {
+ unsigned char c;
+
+- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
++ asm volatile(LOCK_PREFIX "addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "=m" (v->counter), "=qm" (c)
+ : "er" (i), "m" (v->counter) : "memory");
+ return c;
+@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
+ static inline long atomic64_add_return(long i, atomic64_t *v)
+ {
+ long __i = i;
+- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
++ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "movq %0, %1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+r" (i), "+m" (v->counter)
++ : : "memory");
++ return i + __i;
++}
++
++/**
++ * atomic64_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @v: pointer to type atomic64_unchecked_t
++ *
++ * Atomically adds @i to @v and returns @i + @v
++ */
++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
++{
++ long __i = i;
++ asm volatile(LOCK_PREFIX "xaddq %0, %1"
+ : "+r" (i), "+m" (v->counter)
+ : : "memory");
+ return i + __i;
+@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
+ }
+
+ #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
++{
++ return atomic64_add_return_unchecked(1, v);
++}
+ #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
+
+ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
+ return cmpxchg(&v->counter, old, new);
+ }
+
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
+ static inline long atomic64_xchg(atomic64_t *v, long new)
+ {
+ return xchg(&v->counter, new);
+ }
+
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
++
+ static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
+ {
+ return cmpxchg(&v->counter, old, new);
+ }
+
++static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
+ static inline long atomic_xchg(atomic_t *v, int new)
+ {
+ return xchg(&v->counter, new);
+ }
+
++static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
+ /**
+ * atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
+ */
+ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ {
+- int c, old;
++ int c, old, new;
+ c = atomic_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic_cmpxchg((v), c, c + (a));
++
++ asm volatile("addl %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "subl %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "ir" (a));
++
++ old = atomic_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
+ */
+ static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+ {
+- long c, old;
++ long c, old, new;
+ c = atomic64_read(v);
+ for (;;) {
+- if (unlikely(c == (u)))
++ if (unlikely(c == u))
+ break;
+- old = atomic64_cmpxchg((v), c, c + (a));
++
++ asm volatile("addq %2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "subq %2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "=r" (new)
++ : "0" (c), "er" (a));
++
++ old = atomic64_cmpxchg(v, c, new);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+- return c != (u);
++ return c != u;
+ }
+
+ /**
+@@ -466,14 +864,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
+ #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+
+ /* These are x86-specific, used by some header files */
+-#define atomic_clear_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "andl %0,%1" \
+- : : "r" (~(mask)), "m" (*(addr)) : "memory")
++static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
+
+-#define atomic_set_mask(mask, addr) \
+- asm volatile(LOCK_PREFIX "orl %0,%1" \
+- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
+- : "memory")
++static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "andl %1,%0"
++ : "+m" (v->counter)
++ : "r" (~(mask))
++ : "memory");
++}
++
++static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
++
++static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
++{
++ asm volatile(LOCK_PREFIX "orl %1,%0"
++ : "+m" (v->counter)
++ : "r" (mask)
++ : "memory");
++}
+
+ /* Atomic operations are already serializing on x86 */
+ #define smp_mb__before_atomic_dec() barrier()
+diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
+index 02b47a6..d5c4b15 100644
+--- a/arch/x86/include/asm/bitops.h
++++ b/arch/x86/include/asm/bitops.h
+@@ -38,7 +38,7 @@
+ * a mask operation on a byte.
+ */
+ #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
+-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
++#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
+ #define CONST_MASK(nr) (1 << ((nr) & 7))
+
+ /**
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index 7a10659..8bbf355 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -11,10 +11,15 @@
+ #include <asm/pgtable_types.h>
+
+ /* Physical address where kernel should be loaded. */
+-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ + (CONFIG_PHYSICAL_ALIGN - 1)) \
+ & ~(CONFIG_PHYSICAL_ALIGN - 1))
+
++#ifndef __ASSEMBLY__
++extern unsigned char __LOAD_PHYSICAL_ADDR[];
++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
++#endif
++
+ /* Minimum kernel alignment, as a power of two */
+ #ifdef CONFIG_X86_64
+ #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
+diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
+index 549860d..7d45f68 100644
+--- a/arch/x86/include/asm/cache.h
++++ b/arch/x86/include/asm/cache.h
+@@ -5,9 +5,10 @@
+
+ /* L1 cache line size */
+ #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
+-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
++#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
+
+ #define __read_mostly __attribute__((__section__(".data.read_mostly")))
++#define __read_only __attribute__((__section__(".data.read_only")))
+
+ #ifdef CONFIG_X86_VSMP
+ /* vSMP Internode cacheline shift */
+diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
+index b54f6af..5b376a6 100644
+--- a/arch/x86/include/asm/cacheflush.h
++++ b/arch/x86/include/asm/cacheflush.h
+@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
+ static inline unsigned long get_page_memtype(struct page *pg)
+ {
+ if (!PageUncached(pg) && !PageWC(pg))
+- return -1;
++ return ~0UL;
+ else if (!PageUncached(pg) && PageWC(pg))
+ return _PAGE_CACHE_WC;
+ else if (PageUncached(pg) && !PageWC(pg))
+@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
+ SetPageWC(pg);
+ break;
+ default:
+- case -1:
++ case ~0UL:
+ ClearPageUncached(pg);
+ ClearPageWC(pg);
+ break;
+diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
+index 0e63c9a..ab8d972 100644
+--- a/arch/x86/include/asm/calling.h
++++ b/arch/x86/include/asm/calling.h
+@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
+ * for assembly code:
+ */
+
+-#define R15 0
+-#define R14 8
+-#define R13 16
+-#define R12 24
+-#define RBP 32
+-#define RBX 40
++#define R15 (0)
++#define R14 (8)
++#define R13 (16)
++#define R12 (24)
++#define RBP (32)
++#define RBX (40)
+
+ /* arguments: interrupts/non tracing syscalls only save up to here: */
+-#define R11 48
+-#define R10 56
+-#define R9 64
+-#define R8 72
+-#define RAX 80
+-#define RCX 88
+-#define RDX 96
+-#define RSI 104
+-#define RDI 112
+-#define ORIG_RAX 120 /* + error_code */
++#define R11 (48)
++#define R10 (56)
++#define R9 (64)
++#define R8 (72)
++#define RAX (80)
++#define RCX (88)
++#define RDX (96)
++#define RSI (104)
++#define RDI (112)
++#define ORIG_RAX (120) /* + error_code */
+ /* end of arguments */
+
+ /* cpu exception frame or undefined in case of fast syscall: */
+-#define RIP 128
+-#define CS 136
+-#define EFLAGS 144
+-#define RSP 152
+-#define SS 160
++#define RIP (128)
++#define CS (136)
++#define EFLAGS (144)
++#define RSP (152)
++#define SS (160)
+
+ #define ARGOFFSET R11
+ #define SWFRAME ORIG_RAX
+diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
+index 46fc474..b02b0f9 100644
+--- a/arch/x86/include/asm/checksum_32.h
++++ b/arch/x86/include/asm/checksum_32.h
+@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
+ int len, __wsum sum,
+ int *src_err_ptr, int *dst_err_ptr);
+
++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
++ int len, __wsum sum,
++ int *src_err_ptr, int *dst_err_ptr);
++
+ /*
+ * Note: when you get a NULL pointer exception here this means someone
+ * passed in an incorrect kernel address to one of these functions.
+@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
+ int *err_ptr)
+ {
+ might_sleep();
+- return csum_partial_copy_generic((__force void *)src, dst,
++ return csum_partial_copy_generic_from_user((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ }
+
+@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
+ {
+ might_sleep();
+ if (access_ok(VERIFY_WRITE, dst, len))
+- return csum_partial_copy_generic(src, (__force void *)dst,
++ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+
+ if (len)
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index 617bd56..7b047a1 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -4,6 +4,7 @@
+ #include <asm/desc_defs.h>
+ #include <asm/ldt.h>
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
+ #include <linux/smp.h>
+
+ static inline void fill_ldt(struct desc_struct *desc,
+@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
+ desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
+ desc->type = (info->read_exec_only ^ 1) << 1;
+ desc->type |= info->contents << 2;
++ desc->type |= info->seg_not_present ^ 1;
+ desc->s = 1;
+ desc->dpl = 0x3;
+ desc->p = info->seg_not_present ^ 1;
+@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
+ }
+
+ extern struct desc_ptr idt_descr;
+-extern gate_desc idt_table[];
+-
+-struct gdt_page {
+- struct desc_struct gdt[GDT_ENTRIES];
+-} __attribute__((aligned(PAGE_SIZE)));
+-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
++extern gate_desc idt_table[256];
+
++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
+ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
+ {
+- return per_cpu(gdt_page, cpu).gdt;
++ return cpu_gdt_table[cpu];
+ }
+
+ #ifdef CONFIG_X86_64
+@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
+ unsigned long base, unsigned dpl, unsigned flags,
+ unsigned short seg)
+ {
+- gate->a = (seg << 16) | (base & 0xffff);
+- gate->b = (base & 0xffff0000) |
+- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
++ gate->gate.offset_low = base;
++ gate->gate.seg = seg;
++ gate->gate.reserved = 0;
++ gate->gate.type = type;
++ gate->gate.s = 0;
++ gate->gate.dpl = dpl;
++ gate->gate.p = 1;
++ gate->gate.offset_high = base >> 16;
+ }
+
+ #endif
+@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
+ static inline void native_write_idt_entry(gate_desc *idt, int entry,
+ const gate_desc *gate)
+ {
++ pax_open_kernel();
+ memcpy(&idt[entry], gate, sizeof(*gate));
++ pax_close_kernel();
+ }
+
+ static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
+ const void *desc)
+ {
++ pax_open_kernel();
+ memcpy(&ldt[entry], desc, 8);
++ pax_close_kernel();
+ }
+
+ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
+@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
+ size = sizeof(struct desc_struct);
+ break;
+ }
++
++ pax_open_kernel();
+ memcpy(&gdt[entry], desc, size);
++ pax_close_kernel();
+ }
+
+ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
+@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
+
+ static inline void native_load_tr_desc(void)
+ {
++ pax_open_kernel();
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
++ pax_close_kernel();
+ }
+
+ static inline void native_load_gdt(const struct desc_ptr *dtr)
+@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
+ unsigned int i;
+ struct desc_struct *gdt = get_cpu_gdt_table(cpu);
+
++ pax_open_kernel();
+ for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
+ gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
++ pax_close_kernel();
+ }
+
+ #define _LDT_empty(info) \
+@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
+ desc->limit = (limit >> 16) & 0xf;
+ }
+
+-static inline void _set_gate(int gate, unsigned type, void *addr,
++static inline void _set_gate(int gate, unsigned type, const void *addr,
+ unsigned dpl, unsigned ist, unsigned seg)
+ {
+ gate_desc s;
+@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
+ * Pentium F0 0F bugfix can have resulted in the mapped
+ * IDT being write-protected.
+ */
+-static inline void set_intr_gate(unsigned int n, void *addr)
++static inline void set_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
+@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
+ /*
+ * This routine sets up an interrupt gate at directory privilege level 3.
+ */
+-static inline void set_system_intr_gate(unsigned int n, void *addr)
++static inline void set_system_intr_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_system_trap_gate(unsigned int n, void *addr)
++static inline void set_system_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
+ }
+
+-static inline void set_trap_gate(unsigned int n, void *addr)
++static inline void set_trap_gate(unsigned int n, const void *addr)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
+@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
+ static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
+ }
+
+-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
+ }
+
+-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
+ {
+ BUG_ON((unsigned)n > 0xFF);
+ _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
+ }
+
++#ifdef CONFIG_X86_32
++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
++{
++ struct desc_struct d;
++
++ if (likely(limit))
++ limit = (limit - 1UL) >> PAGE_SHIFT;
++ pack_descriptor(&d, base, limit, 0xFB, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
++}
++#endif
++
+ #endif /* _ASM_X86_DESC_H */
+diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
+index 9d66848..6b4a691 100644
+--- a/arch/x86/include/asm/desc_defs.h
++++ b/arch/x86/include/asm/desc_defs.h
+@@ -31,6 +31,12 @@ struct desc_struct {
+ unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
+ unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
+ };
++ struct {
++ u16 offset_low;
++ u16 seg;
++ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
++ unsigned offset_high: 16;
++ } gate;
+ };
+ } __attribute__((packed));
+
+diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
+index cee34e9..a7c3fa2 100644
+--- a/arch/x86/include/asm/device.h
++++ b/arch/x86/include/asm/device.h
+@@ -6,7 +6,7 @@ struct dev_archdata {
+ void *acpi_handle;
+ #endif
+ #ifdef CONFIG_X86_64
+-struct dma_map_ops *dma_ops;
++ const struct dma_map_ops *dma_ops;
+ #endif
+ #ifdef CONFIG_DMAR
+ void *iommu; /* hook for IOMMU specific extension */
+diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
+index 6a25d5d..786b202 100644
+--- a/arch/x86/include/asm/dma-mapping.h
++++ b/arch/x86/include/asm/dma-mapping.h
+@@ -25,9 +25,9 @@ extern int iommu_merge;
+ extern struct device x86_dma_fallback_dev;
+ extern int panic_on_overflow;
+
+-extern struct dma_map_ops *dma_ops;
++extern const struct dma_map_ops *dma_ops;
+
+-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+ #ifdef CONFIG_X86_32
+ return dma_ops;
+@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ /* Make sure we keep the same behaviour */
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ if (ops->mapping_error)
+ return ops->mapping_error(dev, dma_addr);
+
+@@ -122,7 +122,7 @@ static inline void *
+ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+ void *memory;
+
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t bus)
+ {
+- struct dma_map_ops *ops = get_dma_ops(dev);
++ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ WARN_ON(irqs_disabled()); /* for portability */
+
+diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
+index 40b4e61..40d8133 100644
+--- a/arch/x86/include/asm/e820.h
++++ b/arch/x86/include/asm/e820.h
+@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
+ #define ISA_END_ADDRESS 0x100000
+ #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
+
+-#define BIOS_BEGIN 0x000a0000
++#define BIOS_BEGIN 0x000c0000
+ #define BIOS_END 0x00100000
+
+ #ifdef __KERNEL__
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 8ac9d9a..0a6c96e 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -257,7 +257,25 @@ extern int force_personality32;
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#ifdef CONFIG_X86_32
++#define PAX_ELF_ET_DYN_BASE 0x10000000UL
++
++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
++#else
++#define PAX_ELF_ET_DYN_BASE 0x400000UL
++
++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
++#endif
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+@@ -310,9 +328,7 @@ do { \
+
+ #define ARCH_DLINFO \
+ do { \
+- if (vdso_enabled) \
+- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+- (unsigned long)current->mm->context.vdso); \
++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
+ } while (0)
+
+ #define AT_SYSINFO 32
+@@ -323,7 +339,7 @@ do { \
+
+ #endif /* !CONFIG_X86_32 */
+
+-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
++#define VDSO_CURRENT_BASE (current->mm->context.vdso)
+
+ #define VDSO_ENTRY \
+ ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
+@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
+ #define compat_arch_setup_additional_pages syscall32_setup_pages
+
+-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+-#define arch_randomize_brk arch_randomize_brk
+-
+ #endif /* _ASM_X86_ELF_H */
+diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
+index cc70c1c..d96d011 100644
+--- a/arch/x86/include/asm/emergency-restart.h
++++ b/arch/x86/include/asm/emergency-restart.h
+@@ -15,6 +15,6 @@ enum reboot_type {
+
+ extern enum reboot_type reboot_type;
+
+-extern void machine_emergency_restart(void);
++extern void machine_emergency_restart(void) __noreturn;
+
+ #endif /* _ASM_X86_EMERGENCY_RESTART_H */
+diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
+index 1f11ce4..3fed751 100644
+--- a/arch/x86/include/asm/futex.h
++++ b/arch/x86/include/asm/futex.h
+@@ -12,20 +12,22 @@
+ #include <asm/system.h>
+
+ #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 __user *, uaddr); \
+ asm volatile("1:\t" insn "\n" \
+ "2:\t.section .fixup,\"ax\"\n" \
+ "3:\tmov\t%3, %1\n" \
+ "\tjmp\t2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
++ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
+ : "i" (-EFAULT), "0" (oparg), "1" (0))
+
+ #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
++ typecheck(u32 __user *, uaddr); \
+ asm volatile("1:\tmovl %2, %0\n" \
+ "\tmovl\t%0, %3\n" \
+ "\t" insn "\n" \
+- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
++ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
+ "\tjnz\t1b\n" \
+ "3:\t.section .fixup,\"ax\"\n" \
+ "4:\tmov\t%5, %1\n" \
+@@ -34,10 +36,10 @@
+ _ASM_EXTABLE(1b, 4b) \
+ _ASM_EXTABLE(2b, 4b) \
+ : "=&a" (oldval), "=&r" (ret), \
+- "+m" (*uaddr), "=&r" (tem) \
++ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
+ : "r" (oparg), "i" (-EFAULT), "1" (0))
+
+-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
++static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+ {
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+
+ switch (op) {
+ case FUTEX_OP_SET:
+- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
++ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
++ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
+ uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+ return ret;
+ }
+
+-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
++static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
+ int newval)
+ {
+
+@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
+ return -ENOSYS;
+ #endif
+
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
++ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
++ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
+ "2:\t.section .fixup, \"ax\"\n"
+ "3:\tmov %2, %0\n"
+ "\tjmp 2b\n"
+ "\t.previous\n"
+ _ASM_EXTABLE(1b, 3b)
+- : "=a" (oldval), "+m" (*uaddr)
++ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
+ : "i" (-EFAULT), "r" (newval), "0" (oldval)
+ : "memory"
+ );
+diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
+index ba180d9..3bad351 100644
+--- a/arch/x86/include/asm/hw_irq.h
++++ b/arch/x86/include/asm/hw_irq.h
+@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
+ extern void enable_IO_APIC(void);
+
+ /* Statistics */
+-extern atomic_t irq_err_count;
+-extern atomic_t irq_mis_count;
++extern atomic_unchecked_t irq_err_count;
++extern atomic_unchecked_t irq_mis_count;
+
+ /* EISA */
+ extern void eisa_set_level_irq(unsigned int irq);
+diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
+index 0b20bbb..f06479b 100644
+--- a/arch/x86/include/asm/i387.h
++++ b/arch/x86/include/asm/i387.h
+@@ -56,10 +56,12 @@ static inline void tolerant_fwait(void)
+ _ASM_EXTABLE(1b, 2b));
+ }
+
+-static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
++static inline int fxrstor_checking(struct i387_fxsave_struct __user *fx)
+ {
+ int err;
+
++ fx = (struct i387_fxsave_struct __user *)____m(fx);
++
+ asm volatile("1: rex64/fxrstor (%[fx])\n\t"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+@@ -105,6 +107,8 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
+ {
+ int err;
+
++ fx = (struct i387_fxsave_struct __user *)____m(fx);
++
+ asm volatile("1: rex64/fxsave (%[fx])\n\t"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+@@ -179,15 +183,15 @@ static inline void tolerant_fwait(void)
+ }
+
+ /* perform fxrstor iff the processor has extended states, otherwise frstor */
+-static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
++static inline int fxrstor_checking(struct i387_fxsave_struct __user *fx)
+ {
+ /*
+ * The "nop" is needed to make the instructions the same
+ * length.
+ */
+ alternative_input(
+- "nop ; frstor %1",
+- "fxrstor %1",
++ __copyuser_seg" frstor %1; nop",
++ __copyuser_seg" fxrstor %1",
+ X86_FEATURE_FXSR,
+ "m" (*fx));
+
+@@ -195,13 +199,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
+ }
+
+ /* We need a safe address that is cheap to find and that is already
+- in L1 during context switch. The best choices are unfortunately
+- different for UP and SMP */
+-#ifdef CONFIG_SMP
+-#define safe_address (__per_cpu_offset[0])
+-#else
+-#define safe_address (kstat_cpu(0).cpustat.user)
+-#endif
++ in L1 during context switch. */
++#define safe_address (init_tss[raw_smp_processor_id()].x86_tss.sp0)
+
+ /*
+ * These must be called with preempt disabled
+@@ -258,8 +257,16 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
+ {
+ if (task_thread_info(tsk)->status & TS_XSAVE)
+ return xrstor_checking(&tsk->thread.xstate->xsave);
+- else
+- return fxrstor_checking(&tsk->thread.xstate->fxsave);
++ else {
++ int ret;
++ mm_segment_t fs;
++
++ fs = get_fs();
++ set_fs(KERNEL_DS);
++ ret = fxrstor_checking(&tsk->thread.xstate->fxsave);
++ set_fs(fs);
++ return ret;
++ }
+ }
+
+ /*
+@@ -291,7 +298,7 @@ static inline void kernel_fpu_begin(void)
+ struct thread_info *me = current_thread_info();
+ preempt_disable();
+ if (me->status & TS_USEDFPU)
+- __save_init_fpu(me->task);
++ __save_init_fpu(current);
+ else
+ clts();
+ }
+diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
+index 6a63b86..b6a731c 100644
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -170,7 +170,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+ return ioremap_nocache(offset, size);
+ }
+
+-extern void iounmap(volatile void __iomem *addr);
++extern void iounmap(const volatile void __iomem *addr);
+
+ extern void set_iounmap_nonlazy(void);
+
+diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
+index a299900..15c5410 100644
+--- a/arch/x86/include/asm/io_32.h
++++ b/arch/x86/include/asm/io_32.h
+@@ -3,6 +3,7 @@
+
+ #include <linux/string.h>
+ #include <linux/compiler.h>
++#include <asm/processor.h>
+
+ /*
+ * This file contains the definitions for the x86 IO instructions
+@@ -42,6 +43,17 @@
+
+ #ifdef __KERNEL__
+
++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range(unsigned long addr, size_t count)
++{
++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
++{
++ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
+ #include <asm-generic/iomap.h>
+
+ #include <linux/vmalloc.h>
+diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
+index 2440678..c158b88 100644
+--- a/arch/x86/include/asm/io_64.h
++++ b/arch/x86/include/asm/io_64.h
+@@ -140,6 +140,17 @@ __OUTS(l)
+
+ #include <linux/vmalloc.h>
+
++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
++static inline int valid_phys_addr_range(unsigned long addr, size_t count)
++{
++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
++{
++ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
++}
++
+ #include <asm-generic/iomap.h>
+
+ void __memcpy_fromio(void *, unsigned long, unsigned);
+diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
+index fd6d21b..8b13915 100644
+--- a/arch/x86/include/asm/iommu.h
++++ b/arch/x86/include/asm/iommu.h
+@@ -3,7 +3,7 @@
+
+ extern void pci_iommu_shutdown(void);
+ extern void no_iommu_init(void);
+-extern struct dma_map_ops nommu_dma_ops;
++extern const struct dma_map_ops nommu_dma_ops;
+ extern int force_iommu, no_iommu;
+ extern int iommu_detected;
+ extern int iommu_pass_through;
+diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
+index 9e2b952..557206e 100644
+--- a/arch/x86/include/asm/irqflags.h
++++ b/arch/x86/include/asm/irqflags.h
+@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
+ sti; \
+ sysexit
+
++#define GET_CR0_INTO_RDI mov %cr0, %rdi
++#define SET_RDI_INTO_CR0 mov %rdi, %cr0
++#define GET_CR3_INTO_RDI mov %cr3, %rdi
++#define SET_RDI_INTO_CR3 mov %rdi, %cr3
++
+ #else
+ #define INTERRUPT_RETURN iret
+ #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
+diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
+index 4fe681d..bb6d40c 100644
+--- a/arch/x86/include/asm/kprobes.h
++++ b/arch/x86/include/asm/kprobes.h
+@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
+ #define BREAKPOINT_INSTRUCTION 0xcc
+ #define RELATIVEJUMP_INSTRUCTION 0xe9
+ #define MAX_INSN_SIZE 16
+-#define MAX_STACK_SIZE 64
+-#define MIN_STACK_SIZE(ADDR) \
+- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
+- THREAD_SIZE - (unsigned long)(ADDR))) \
+- ? (MAX_STACK_SIZE) \
+- : (((unsigned long)current_thread_info()) + \
+- THREAD_SIZE - (unsigned long)(ADDR)))
++#define MAX_STACK_SIZE 64UL
++#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
+
+ #define flush_insn_slot(p) do { } while (0)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 08bc2ff..2e88d1f 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -534,9 +534,9 @@ struct kvm_x86_ops {
+ bool (*gb_page_enable)(void);
+
+ const struct trace_print_flags *exit_reasons_str;
+-};
++} __do_const;
+
+-extern struct kvm_x86_ops *kvm_x86_ops;
++extern const struct kvm_x86_ops *kvm_x86_ops;
+
+ int kvm_mmu_module_init(void);
+ void kvm_mmu_module_exit(void);
+diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
+index 47b9b6f..17233f6 100644
+--- a/arch/x86/include/asm/local.h
++++ b/arch/x86/include/asm/local.h
+@@ -11,33 +11,97 @@ typedef struct {
+ atomic_long_t a;
+ } local_t;
+
++typedef struct {
++ atomic_long_unchecked_t a;
++} local_unchecked_t;
++
+ #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+ #define local_read(l) atomic_long_read(&(l)->a)
++#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
+ #define local_set(l, i) atomic_long_set(&(l)->a, (i))
++#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
+
+ static inline void local_inc(local_t *l)
+ {
+- asm volatile(_ASM_INC "%0"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_DEC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (l->a.counter));
++}
++
++static inline void local_inc_unchecked(local_unchecked_t *l)
++{
++ asm volatile(_ASM_INC "%0\n"
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_dec(local_t *l)
+ {
+- asm volatile(_ASM_DEC "%0"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_INC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (l->a.counter));
++}
++
++static inline void local_dec_unchecked(local_unchecked_t *l)
++{
++ asm volatile(_ASM_DEC "%0\n"
+ : "+m" (l->a.counter));
+ }
+
+ static inline void local_add(long i, local_t *l)
+ {
+- asm volatile(_ASM_ADD "%1,%0"
++ asm volatile(_ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_SUB "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (l->a.counter)
++ : "ir" (i));
++}
++
++static inline void local_add_unchecked(long i, local_unchecked_t *l)
++{
++ asm volatile(_ASM_ADD "%1,%0\n"
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+
+ static inline void local_sub(long i, local_t *l)
+ {
+- asm volatile(_ASM_SUB "%1,%0"
++ asm volatile(_ASM_SUB "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_ADD "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ : "+m" (l->a.counter)
++ : "ir" (i));
++}
++
++static inline void local_sub_unchecked(long i, local_unchecked_t *l)
++{
++ asm volatile(_ASM_SUB "%1,%0\n"
+ : "+m" (l->a.counter)
+ : "ir" (i));
+ }
+@@ -55,7 +119,16 @@ static inline int local_sub_and_test(long i, local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_SUB "%2,%0; sete %1"
++ asm volatile(_ASM_SUB "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_ADD "%2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -73,7 +146,16 @@ static inline int local_dec_and_test(local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_DEC "%0; sete %1"
++ asm volatile(_ASM_DEC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_INC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -91,7 +173,16 @@ static inline int local_inc_and_test(local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_INC "%0; sete %1"
++ asm volatile(_ASM_INC "%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_DEC "%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sete %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : : "memory");
+ return c != 0;
+@@ -110,7 +201,16 @@ static inline int local_add_negative(long i, local_t *l)
+ {
+ unsigned char c;
+
+- asm volatile(_ASM_ADD "%2,%0; sets %1"
++ asm volatile(_ASM_ADD "%2,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_SUB "%2,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ "sets %1\n"
+ : "+m" (l->a.counter), "=qm" (c)
+ : "ir" (i) : "memory");
+ return c;
+@@ -133,7 +233,15 @@ static inline long local_add_return(long i, local_t *l)
+ #endif
+ /* Modern 486+ processor */
+ __i = i;
+- asm volatile(_ASM_XADD "%0, %1;"
++ asm volatile(_ASM_XADD "%0, %1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ _ASM_MOV "%0,%1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (i), "+m" (l->a.counter)
+ : : "memory");
+ return i + __i;
+@@ -148,6 +256,38 @@ no_xadd: /* Legacy 386 processor */
+ #endif
+ }
+
++/**
++ * local_add_return_unchecked - add and return
++ * @i: integer value to add
++ * @l: pointer to type local_unchecked_t
++ *
++ * Atomically adds @i to @l and returns @i + @l
++ */
++static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
++{
++ long __i;
++#ifdef CONFIG_M386
++ unsigned long flags;
++ if (unlikely(boot_cpu_data.x86 <= 3))
++ goto no_xadd;
++#endif
++ /* Modern 486+ processor */
++ __i = i;
++ asm volatile(_ASM_XADD "%0, %1\n"
++ : "+r" (i), "+m" (l->a.counter)
++ : : "memory");
++ return i + __i;
++
++#ifdef CONFIG_M386
++no_xadd: /* Legacy 386 processor */
++ local_irq_save(flags);
++ __i = local_read_unchecked(l);
++ local_set_unchecked(l, i + __i);
++ local_irq_restore(flags);
++ return i + __i;
++#endif
++}
++
+ static inline long local_sub_return(long i, local_t *l)
+ {
+ return local_add_return(-i, l);
+@@ -158,6 +298,8 @@ static inline long local_sub_return(long i, local_t *l)
+
+ #define local_cmpxchg(l, o, n) \
+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
++#define local_cmpxchg_unchecked(l, o, n) \
++ (cmpxchg_local(&((l)->a.counter), (o), (n)))
+ /* Always has a lock prefix */
+ #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
+
+diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
+index ef51b50..514ba37 100644
+--- a/arch/x86/include/asm/microcode.h
++++ b/arch/x86/include/asm/microcode.h
+@@ -12,13 +12,13 @@ struct device;
+ enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
+
+ struct microcode_ops {
+- enum ucode_state (*request_microcode_user) (int cpu,
++ enum ucode_state (* const request_microcode_user) (int cpu,
+ const void __user *buf, size_t size);
+
+- enum ucode_state (*request_microcode_fw) (int cpu,
++ enum ucode_state (* const request_microcode_fw) (int cpu,
+ struct device *device);
+
+- void (*microcode_fini_cpu) (int cpu);
++ void (* const microcode_fini_cpu) (int cpu);
+
+ /*
+ * The generic 'microcode_core' part guarantees that
+@@ -38,18 +38,18 @@ struct ucode_cpu_info {
+ extern struct ucode_cpu_info ucode_cpu_info[];
+
+ #ifdef CONFIG_MICROCODE_INTEL
+-extern struct microcode_ops * __init init_intel_microcode(void);
++extern const struct microcode_ops * __init init_intel_microcode(void);
+ #else
+-static inline struct microcode_ops * __init init_intel_microcode(void)
++static inline const struct microcode_ops * __init init_intel_microcode(void)
+ {
+ return NULL;
+ }
+ #endif /* CONFIG_MICROCODE_INTEL */
+
+ #ifdef CONFIG_MICROCODE_AMD
+-extern struct microcode_ops * __init init_amd_microcode(void);
++extern const struct microcode_ops * __init init_amd_microcode(void);
+ #else
+-static inline struct microcode_ops * __init init_amd_microcode(void)
++static inline const struct microcode_ops * __init init_amd_microcode(void)
+ {
+ return NULL;
+ }
+diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
+index 593e51d..fa69c9a 100644
+--- a/arch/x86/include/asm/mman.h
++++ b/arch/x86/include/asm/mman.h
+@@ -5,4 +5,14 @@
+
+ #include <asm-generic/mman.h>
+
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_32
++#define arch_mmap_check i386_mmap_check
++int i386_mmap_check(unsigned long addr, unsigned long len,
++ unsigned long flags);
++#endif
++#endif
++#endif
++
+ #endif /* _ASM_X86_MMAN_H */
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index 80a1dee..239c67d 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -9,10 +9,23 @@
+ * we put the segment information here.
+ */
+ typedef struct {
+- void *ldt;
++ struct desc_struct *ldt;
+ int size;
+ struct mutex lock;
+- void *vdso;
++ unsigned long vdso;
++
++#ifdef CONFIG_X86_32
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ unsigned long user_cs_base;
++ unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++#endif
++
+ } mm_context_t;
+
+ #ifdef CONFIG_SMP
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index 8b5393e..b59dac9 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
+
+ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+ {
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++ unsigned int i;
++ pgd_t *pgd;
++
++ pax_open_kernel();
++ pgd = get_cpu_pgd(smp_processor_id());
++ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
++ set_pgd_batched(pgd+i, native_make_pgd(0));
++ pax_close_kernel();
++#endif
++
+ #ifdef CONFIG_SMP
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+ percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+ {
+ unsigned cpu = smp_processor_id();
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
++ int tlbstate = TLBSTATE_OK;
++#endif
+
+ if (likely(prev != next)) {
+ #ifdef CONFIG_SMP
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ tlbstate = percpu_read(cpu_tlbstate.state);
++#endif
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ percpu_write(cpu_tlbstate.active_mm, next);
+ #endif
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+ /* Re-load page tables */
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#else
+ load_cr3(next->pgd);
++#endif
+
+ /* stop flush ipis for the previous mm */
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context);
+- }
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ if (!nx_enabled) {
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
++ smp_mb__after_clear_bit();
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++ }
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
++ prev->context.user_cs_limit != next->context.user_cs_limit))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
+ #ifdef CONFIG_SMP
++ else if (unlikely(tlbstate != TLBSTATE_OK))
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++#endif
++
++ }
+ else {
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++ pax_open_kernel();
++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
++ pax_close_kernel();
++ load_cr3(get_cpu_pgd(cpu));
++#endif
++
++#ifdef CONFIG_SMP
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+
+@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ * tlb flush IPI delivery. We must reload CR3
+ * to make sure to use no freed page tables.
+ */
++
++#ifndef CONFIG_PAX_PER_CPU_PGD
+ load_cr3(next->pgd);
++#endif
++
+ load_LDT_nolock(&next->context);
++
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
++ if (!nx_enabled)
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++#endif
++
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
++#endif
++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
++#endif
++
+ }
++#endif
+ }
+-#endif
+ }
+
+ #define activate_mm(prev, next) \
+diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
+index 3e2ce58..caaf478 100644
+--- a/arch/x86/include/asm/module.h
++++ b/arch/x86/include/asm/module.h
+@@ -5,6 +5,7 @@
+
+ #ifdef CONFIG_X86_64
+ /* X86_64 does not define MODULE_PROC_FAMILY */
++#define MODULE_PROC_FAMILY ""
+ #elif defined CONFIG_M386
+ #define MODULE_PROC_FAMILY "386 "
+ #elif defined CONFIG_M486
+@@ -59,13 +60,26 @@
+ #error unknown processor family
+ #endif
+
+-#ifdef CONFIG_X86_32
+-# ifdef CONFIG_4KSTACKS
+-# define MODULE_STACKSIZE "4KSTACKS "
+-# else
+-# define MODULE_STACKSIZE ""
+-# endif
+-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
++#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
++#define MODULE_STACKSIZE "4KSTACKS "
++#else
++#define MODULE_STACKSIZE ""
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
++#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
++#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
++#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
++#else
++#define MODULE_PAX_KERNEXEC ""
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define MODULE_PAX_UDEREF "UDEREF "
++#else
++#define MODULE_PAX_UDEREF ""
++#endif
++
++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
++
+ #endif /* _ASM_X86_MODULE_H */
+diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
+index 7639dbf..e08a58c 100644
+--- a/arch/x86/include/asm/page_64_types.h
++++ b/arch/x86/include/asm/page_64_types.h
+@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
+
+ /* duplicated to the one in bootmem.h */
+ extern unsigned long max_pfn;
+-extern unsigned long phys_base;
++extern const unsigned long phys_base;
+
+ extern unsigned long __phys_addr(unsigned long);
+ #define __phys_reloc_hide(x) (x)
+diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
+index efb3899..ef30687 100644
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+ val);
+ }
+
++static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
++ pgdval_t val = native_pgd_val(pgd);
++
++ if (sizeof(pgdval_t) > sizeof(long))
++ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
++ val, (u64)val >> 32);
++ else
++ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
++ val);
++}
++
+ static inline void pgd_clear(pgd_t *pgdp)
+ {
+ set_pgd(pgdp, __pgd(0));
+@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
+ pv_mmu_ops.set_fixmap(idx, phys, flags);
+ }
+
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long pax_open_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
++}
++
++static inline unsigned long pax_close_kernel(void)
++{
++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
++}
++#else
++static inline unsigned long pax_open_kernel(void) { return 0; }
++static inline unsigned long pax_close_kernel(void) { return 0; }
++#endif
++
+ #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+
+ static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
+@@ -945,7 +972,7 @@ extern void default_banner(void);
+
+ #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
+ #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
+-#define PARA_INDIRECT(addr) *%cs:addr
++#define PARA_INDIRECT(addr) *%ss:addr
+ #endif
+
+ #define INTERRUPT_RETURN \
+@@ -1022,6 +1049,21 @@ extern void default_banner(void);
+ PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
+ CLBR_NONE, \
+ jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
++
++#define GET_CR0_INTO_RDI \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR0 \
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++
++#define GET_CR3_INTO_RDI \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
++ mov %rax,%rdi
++
++#define SET_RDI_INTO_CR3 \
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
++
+ #endif /* CONFIG_X86_32 */
+
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
+index 9357473..12e6413 100644
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -78,7 +78,7 @@ struct pv_init_ops {
+ */
+ unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+ unsigned long addr, unsigned len);
+-};
++} __no_const;
+
+
+ struct pv_lazy_ops {
+@@ -90,7 +90,7 @@ struct pv_lazy_ops {
+ struct pv_time_ops {
+ unsigned long long (*sched_clock)(void);
+ unsigned long (*get_tsc_khz)(void);
+-};
++} __no_const;
+
+ struct pv_cpu_ops {
+ /* hooks for various privileged instructions */
+@@ -186,7 +186,7 @@ struct pv_cpu_ops {
+
+ void (*start_context_switch)(struct task_struct *prev);
+ void (*end_context_switch)(struct task_struct *next);
+-};
++} __no_const;
+
+ struct pv_irq_ops {
+ /*
+@@ -217,7 +217,7 @@ struct pv_apic_ops {
+ unsigned long start_eip,
+ unsigned long start_esp);
+ #endif
+-};
++} __no_const;
+
+ struct pv_mmu_ops {
+ unsigned long (*read_cr2)(void);
+@@ -301,6 +301,7 @@ struct pv_mmu_ops {
+ struct paravirt_callee_save make_pud;
+
+ void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
++ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
+ #endif /* PAGETABLE_LEVELS == 4 */
+ #endif /* PAGETABLE_LEVELS >= 3 */
+
+@@ -316,6 +317,12 @@ struct pv_mmu_ops {
+ an mfn. We can tell which is which from the index. */
+ void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
+ phys_addr_t phys, pgprot_t flags);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long (*pax_open_kernel)(void);
++ unsigned long (*pax_close_kernel)(void);
++#endif
++
+ };
+
+ struct raw_spinlock;
+@@ -326,7 +333,7 @@ struct pv_lock_ops {
+ void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
+ int (*spin_trylock)(struct raw_spinlock *lock);
+ void (*spin_unlock)(struct raw_spinlock *lock);
+-};
++} __no_const;
+
+ /* This contains all the paravirt structures: we get a convenient
+ * number for each function using the offset which we use to indicate
+diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
+index b399988..3f47c38 100644
+--- a/arch/x86/include/asm/pci_x86.h
++++ b/arch/x86/include/asm/pci_x86.h
+@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
+ extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+
+ struct pci_raw_ops {
+- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
++ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 *val);
+- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
++ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int reg, int len, u32 val);
+ };
+
+-extern struct pci_raw_ops *raw_pci_ops;
+-extern struct pci_raw_ops *raw_pci_ext_ops;
++extern const struct pci_raw_ops *raw_pci_ops;
++extern const struct pci_raw_ops *raw_pci_ext_ops;
+
+-extern struct pci_raw_ops pci_direct_conf1;
++extern const struct pci_raw_ops pci_direct_conf1;
+ extern bool port_cf9_safe;
+
+ /* arch_initcall level */
+diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
+index b65a36d..50345a4 100644
+--- a/arch/x86/include/asm/percpu.h
++++ b/arch/x86/include/asm/percpu.h
+@@ -78,6 +78,7 @@ do { \
+ if (0) { \
+ T__ tmp__; \
+ tmp__ = (val); \
++ (void)tmp__; \
+ } \
+ switch (sizeof(var)) { \
+ case 1: \
+diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
+index 271de94..c714995 100644
+--- a/arch/x86/include/asm/pgalloc.h
++++ b/arch/x86/include/asm/pgalloc.h
+@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+ {
+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
++}
++
++static inline void pmd_populate_user(struct mm_struct *mm,
++ pmd_t *pmd, pte_t *pte)
++{
++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+ set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+ }
+
+@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+
+ #ifdef CONFIG_X86_PAE
+ extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
++{
++ pud_populate(mm, pudp, pmd);
++}
+ #else /* !CONFIG_X86_PAE */
+ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ {
+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
+ }
++
++static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
++ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
++}
+ #endif /* CONFIG_X86_PAE */
+
+ #if PAGETABLE_LEVELS > 3
+@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
+ }
+
++static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
++ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
++}
++
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+ {
+ return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
+index 2334982..70bc412 100644
+--- a/arch/x86/include/asm/pgtable-2level.h
++++ b/arch/x86/include/asm/pgtable-2level.h
+@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
+index 33927d2..ccde329 100644
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
++ pax_close_kernel();
+ }
+
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++ pax_open_kernel();
+ set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
++ pax_close_kernel();
+ }
+
+ /*
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 1cce9d2..a7c3e4d 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
+
+ #ifndef __PAGETABLE_PUD_FOLDED
+ #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
++#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
+ #define pgd_clear(pgd) native_pgd_clear(pgd)
+ #endif
+
+@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
+
+ #define arch_end_context_switch(prev) do {} while(0)
+
++#define pax_open_kernel() native_pax_open_kernel()
++#define pax_close_kernel() native_pax_close_kernel()
+ #endif /* CONFIG_PARAVIRT */
+
++#define __HAVE_ARCH_PAX_OPEN_KERNEL
++#define __HAVE_ARCH_PAX_CLOSE_KERNEL
++
++#ifdef CONFIG_PAX_KERNEXEC
++static inline unsigned long native_pax_open_kernel(void)
++{
++ unsigned long cr0;
++
++ preempt_disable();
++ barrier();
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(cr0 & X86_CR0_WP);
++ write_cr0(cr0);
++ return cr0 ^ X86_CR0_WP;
++}
++
++static inline unsigned long native_pax_close_kernel(void)
++{
++ unsigned long cr0;
++
++ cr0 = read_cr0() ^ X86_CR0_WP;
++ BUG_ON(!(cr0 & X86_CR0_WP));
++ write_cr0(cr0);
++ barrier();
++ preempt_enable_no_resched();
++ return cr0 ^ X86_CR0_WP;
++}
++#else
++static inline unsigned long native_pax_open_kernel(void) { return 0; }
++static inline unsigned long native_pax_close_kernel(void) { return 0; }
++#endif
++
+ /*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
++static inline int pte_user(pte_t pte)
++{
++ return pte_val(pte) & _PAGE_USER;
++}
++
+ static inline int pte_dirty(pte_t pte)
+ {
+ return pte_flags(pte) & _PAGE_DIRTY;
+@@ -139,8 +179,7 @@ static inline unsigned long pud_pfn(pud_t pud)
+
+ static inline int pmd_large(pmd_t pte)
+ {
+- return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+- (_PAGE_PSE | _PAGE_PRESENT);
++ return pmd_flags(pte) & _PAGE_PSE;
+ }
+
+ static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
+@@ -172,9 +211,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
+ return pte_clear_flags(pte, _PAGE_RW);
+ }
+
++static inline pte_t pte_mkread(pte_t pte)
++{
++ return __pte(pte_val(pte) | _PAGE_USER);
++}
++
+ static inline pte_t pte_mkexec(pte_t pte)
+ {
+- return pte_clear_flags(pte, _PAGE_NX);
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_clear_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_set_flags(pte, _PAGE_USER);
++}
++
++static inline pte_t pte_exprotect(pte_t pte)
++{
++#ifdef CONFIG_X86_PAE
++ if (__supported_pte_mask & _PAGE_NX)
++ return pte_set_flags(pte, _PAGE_NX);
++ else
++#endif
++ return pte_clear_flags(pte, _PAGE_USER);
+ }
+
+ static inline pte_t pte_mkdirty(pte_t pte)
+@@ -307,6 +366,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
+ #endif
+
+ #ifndef __ASSEMBLY__
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
++static inline pgd_t *get_cpu_pgd(unsigned int cpu)
++{
++ return cpu_pgd[cpu];
++}
++#endif
++
+ #include <linux/mm_types.h>
+
+ static inline int pte_none(pte_t pte)
+@@ -332,7 +400,13 @@ static inline int pte_hidden(pte_t pte)
+
+ static inline int pmd_present(pmd_t pmd)
+ {
+- return pmd_flags(pmd) & _PAGE_PRESENT;
++ /*
++ * Checking for _PAGE_PSE is needed too because
++ * split_huge_page will temporarily clear the present bit (but
++ * the _PAGE_PSE flag will remain set at all times while the
++ * _PAGE_PRESENT bit is clear).
++ */
++ return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
+ }
+
+ static inline int pmd_none(pmd_t pmd)
+@@ -477,7 +551,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+
+ static inline int pgd_bad(pgd_t pgd)
+ {
+- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
+ }
+
+ static inline int pgd_none(pgd_t pgd)
+@@ -500,7 +574,12 @@ static inline int pgd_none(pgd_t pgd)
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
++
++#ifdef CONFIG_PAX_PER_CPU_PGD
++#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
++#endif
++
+ /*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+@@ -511,6 +590,20 @@ static inline int pgd_none(pgd_t pgd)
+ #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
+ #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
+
++#ifdef CONFIG_X86_32
++#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
++#else
++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
++#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
++#else
++#define PAX_USER_SHADOW_BASE (_AC(0,UL))
++#endif
++
++#endif
++
+ #ifndef __ASSEMBLY__
+
+ extern int direct_gbpages;
+@@ -616,11 +709,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ * dst and src can be on the same page, but the range must not overlap,
+ * and must not cross a page boundary.
+ */
+-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
+ {
+- memcpy(dst, src, count * sizeof(pgd_t));
++ pax_open_kernel();
++ while (count--)
++ *dst++ = *src++;
++ pax_close_kernel();
+ }
+
++#ifdef CONFIG_PAX_PER_CPU_PGD
++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
++#endif
++
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
++#else
++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
++#endif
+
+ #include <asm-generic/pgtable.h>
+ #endif /* __ASSEMBLY__ */
+diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
+index 750f1bf..971e8394 100644
+--- a/arch/x86/include/asm/pgtable_32.h
++++ b/arch/x86/include/asm/pgtable_32.h
+@@ -26,9 +26,6 @@
+ struct mm_struct;
+ struct vm_area_struct;
+
+-extern pgd_t swapper_pg_dir[1024];
+-extern pgd_t trampoline_pg_dir[1024];
+-
+ static inline void pgtable_cache_init(void) { }
+ static inline void check_pgt_cache(void) { }
+ void paging_init(void);
+@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
+ # include <asm/pgtable-2level.h>
+ #endif
+
++extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
++extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
++#ifdef CONFIG_X86_PAE
++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
++#endif
++
+ #if defined(CONFIG_HIGHPTE)
+ #define __KM_PTE \
+ (in_nmi() ? KM_NMI_PTE : \
+@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
+ /* Clear a kernel PTE and flush it from the TLB */
+ #define kpte_clear_flush(ptep, vaddr) \
+ do { \
++ pax_open_kernel(); \
+ pte_clear(&init_mm, (vaddr), (ptep)); \
++ pax_close_kernel(); \
+ __flush_tlb_one((vaddr)); \
+ } while (0)
+
+@@ -85,6 +90,9 @@ do { \
+
+ #endif /* !__ASSEMBLY__ */
+
++#define HAVE_ARCH_UNMAPPED_AREA
++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
++
+ /*
+ * kern_addr_valid() is (1) for FLATMEM and (0) for
+ * SPARSEMEM and DISCONTIGMEM
+diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
+index 5e67c15..12d5c47 100644
+--- a/arch/x86/include/asm/pgtable_32_types.h
++++ b/arch/x86/include/asm/pgtable_32_types.h
+@@ -8,7 +8,7 @@
+ */
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level_types.h>
+-# define PMD_SIZE (1UL << PMD_SHIFT)
++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+ # define PMD_MASK (~(PMD_SIZE - 1))
+ #else
+ # include <asm/pgtable-2level_types.h>
+@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
+ # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifndef __ASSEMBLY__
++extern unsigned char MODULES_EXEC_VADDR[];
++extern unsigned char MODULES_EXEC_END[];
++#endif
++#include <asm/boot.h>
++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
++#else
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
++#endif
++
+ #define MODULES_VADDR VMALLOC_START
+ #define MODULES_END VMALLOC_END
+ #define MODULES_LEN (MODULES_VADDR - MODULES_END)
+diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
+index c57a301..5038210 100644
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -16,10 +16,14 @@
+
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
++extern pud_t level3_vmalloc_start_pgt[512];
++extern pud_t level3_vmalloc_end_pgt[512];
++extern pud_t level3_vmemmap_pgt[512];
++extern pud_t level2_vmemmap_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+-extern pmd_t level2_ident_pgt[512];
+-extern pgd_t init_level4_pgt[];
++extern pmd_t level2_ident_pgt[512*2];
++extern pgd_t init_level4_pgt[512];
+
+ #define swapper_pg_dir init_level4_pgt
+
+@@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
+
+ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
+ {
++ pax_open_kernel();
+ *pmdp = pmd;
++ pax_close_kernel();
+ }
+
+ static inline void native_pmd_clear(pmd_t *pmd)
+@@ -84,7 +90,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
+
+ static inline void native_set_pud(pud_t *pudp, pud_t pud)
+ {
++ pax_open_kernel();
+ *pudp = pud;
++ pax_close_kernel();
+ }
+
+ static inline void native_pud_clear(pud_t *pud)
+@@ -94,6 +102,13 @@ static inline void native_pud_clear(pud_t *pud)
+
+ static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
+ {
++ pax_open_kernel();
++ *pgdp = pgd;
++ pax_close_kernel();
++}
++
++static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
++{
+ *pgdp = pgd;
+ }
+
+diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
+index 766ea16..5b96cb3 100644
+--- a/arch/x86/include/asm/pgtable_64_types.h
++++ b/arch/x86/include/asm/pgtable_64_types.h
+@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
+ #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+ #define MODULES_END _AC(0xffffffffff000000, UL)
+ #define MODULES_LEN (MODULES_END - MODULES_VADDR)
++#define MODULES_EXEC_VADDR MODULES_VADDR
++#define MODULES_EXEC_END MODULES_END
++
++#define ktla_ktva(addr) (addr)
++#define ktva_ktla(addr) (addr)
+
+ #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index d1f4a76..2f46ba1 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -16,12 +16,11 @@
+ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
+ #define _PAGE_BIT_PAT 7 /* on 4KB pages */
+ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
+ #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
+ #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
+ #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
+-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
+-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
+ #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
+
+ /* If _PAGE_BIT_PRESENT is clear, we use these: */
+@@ -39,7 +38,6 @@
+ #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+ #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+ #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+ #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+ #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+ #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+@@ -55,8 +53,10 @@
+
+ #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+ #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+-#else
++#elif defined(CONFIG_KMEMCHECK)
+ #define _PAGE_NX (_AT(pteval_t, 0))
++#else
++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+ #endif
+
+ #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+@@ -93,6 +93,9 @@
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
+ _PAGE_ACCESSED)
+
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+ #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
+@@ -103,8 +106,8 @@
+ #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
+ #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
+ #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
+-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
+-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
++#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
+ #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+@@ -163,8 +166,8 @@
+ * bits are combined, this will alow user to access the high address mapped
+ * VDSO in the presence of CONFIG_COMPAT_VDSO
+ */
+-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
+-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
+ #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
+ #endif
+
+@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
+ {
+ return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+ }
++#endif
+
++#if PAGETABLE_LEVELS == 3
++#include <asm-generic/pgtable-nopud.h>
++#endif
++
++#if PAGETABLE_LEVELS == 2
++#include <asm-generic/pgtable-nopmd.h>
++#endif
++
++#ifndef __ASSEMBLY__
+ #if PAGETABLE_LEVELS > 3
+ typedef struct { pudval_t pud; } pud_t;
+
+@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
+ return pud.pud;
+ }
+ #else
+-#include <asm-generic/pgtable-nopud.h>
+-
+ static inline pudval_t native_pud_val(pud_t pud)
+ {
+ return native_pgd_val(pud.pgd);
+@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ return pmd.pmd;
+ }
+ #else
+-#include <asm-generic/pgtable-nopmd.h>
+-
+ static inline pmdval_t native_pmd_val(pmd_t pmd)
+ {
+ return native_pgd_val(pmd.pud.pgd);
+@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
+
+ extern pteval_t __supported_pte_mask;
+ extern void set_nx(void);
++
++#ifdef CONFIG_X86_32
++#ifdef CONFIG_X86_PAE
+ extern int nx_enabled;
++#else
++#define nx_enabled (0)
++#endif
++#else
++#define nx_enabled (1)
++#endif
+
+ #define pgprot_writecombine pgprot_writecombine
+ extern pgprot_t pgprot_writecombine(pgprot_t prot);
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index fa04dea..5f823fc 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -272,7 +272,7 @@ struct tss_struct {
+
+ } ____cacheline_aligned;
+
+-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
++extern struct tss_struct init_tss[NR_CPUS];
+
+ /*
+ * Save the original ist values for checking stack pointers during debugging
+@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
+ */
+ #define TASK_SIZE PAGE_OFFSET
+ #define TASK_SIZE_MAX TASK_SIZE
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
++#else
+ #define STACK_TOP TASK_SIZE
+-#define STACK_TOP_MAX STACK_TOP
++#endif
++
++#define STACK_TOP_MAX TASK_SIZE
+
+ #define INIT_THREAD { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
+@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
+ */
+ #define INIT_TSS { \
+ .x86_tss = { \
+- .sp0 = sizeof(init_stack) + (long)&init_stack, \
++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+
+ #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
+-#define KSTK_TOP(info) \
+-({ \
+- unsigned long *__ptr = (unsigned long *)(info); \
+- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
+-})
++#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
+
+ /*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ #define task_pt_regs(task) \
+ ({ \
+ struct pt_regs *__regs__; \
+- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
+ __regs__ - 1; \
+ })
+
+@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ /*
+ * User space process size. 47bits minus one guard page.
+ */
+-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
+
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+ #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
+- 0xc0000000 : 0xFFFFe000)
++ 0xc0000000 : 0xFFFFf000)
+
+ #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
+ IA32_PAGE_OFFSET : TASK_SIZE_MAX)
+@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ #define STACK_TOP_MAX TASK_SIZE_MAX
+
+ #define INIT_THREAD { \
+- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+
+ #define INIT_TSS { \
+- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
+ }
+
+ /*
+@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
+ */
+ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
++#endif
++
+ #define KSTK_EIP(task) (task_pt_regs(task)->ip)
+
+ /* Get/set a process' ability to use the timestamp counter instruction */
+diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
+index 621f56d..f1094fd 100644
+--- a/arch/x86/include/asm/proto.h
++++ b/arch/x86/include/asm/proto.h
+@@ -22,14 +22,4 @@ extern int reboot_force;
+
+ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
+
+-/*
+- * This looks more complex than it should be. But we need to
+- * get the type for the ~ right in round_down (it needs to be
+- * as wide as the result!), and we want to evaluate the macro
+- * arguments just once each.
+- */
+-#define __round_mask(x,y) ((__typeof__(x))((y)-1))
+-#define round_up(x,y) ((((x)-1) | __round_mask(x,y))+1)
+-#define round_down(x,y) ((x) & ~__round_mask(x,y))
+-
+ #endif /* _ASM_X86_PROTO_H */
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index e668d72..c4dd168 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -2,7 +2,6 @@
+ #define _ASM_X86_PTRACE_H
+
+ #include <linux/compiler.h> /* For __user */
+-#include <linux/linkage.h> /* For asmregparm */
+ #include <asm/ptrace-abi.h>
+ #include <asm/processor-flags.h>
+
+@@ -143,37 +142,35 @@ extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+ int error_code, int si_code);
+ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
+
+-extern asmregparm long syscall_trace_enter(struct pt_regs *);
+-extern asmregparm void syscall_trace_leave(struct pt_regs *);
+-
+ static inline unsigned long regs_return_value(struct pt_regs *regs)
+ {
+ return regs->ax;
+ }
+
+ /*
+- * user_mode_vm(regs) determines whether a register set came from user mode.
++ * user_mode(regs) determines whether a register set came from user mode.
+ * This is true if V8086 mode was enabled OR if the register set was from
+ * protected mode with RPL-3 CS value. This tricky test checks that with
+ * one comparison. Many places in the kernel can bypass this full check
+- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
++ * be used.
+ */
+-static inline int user_mode(struct pt_regs *regs)
++static inline int user_mode_novm(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
+ #else
+- return !!(regs->cs & 3);
++ return !!(regs->cs & SEGMENT_RPL_MASK);
+ #endif
+ }
+
+-static inline int user_mode_vm(struct pt_regs *regs)
++static inline int user_mode(struct pt_regs *regs)
+ {
+ #ifdef CONFIG_X86_32
+ return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
+ USER_RPL;
+ #else
+- return user_mode(regs);
++ return user_mode_novm(regs);
+ #endif
+ }
+
+diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
+index 562d4fd..1e42a5b 100644
+--- a/arch/x86/include/asm/reboot.h
++++ b/arch/x86/include/asm/reboot.h
+@@ -6,19 +6,19 @@
+ struct pt_regs;
+
+ struct machine_ops {
+- void (*restart)(char *cmd);
+- void (*halt)(void);
+- void (*power_off)(void);
++ void (* __noreturn restart)(char *cmd);
++ void (* __noreturn halt)(void);
++ void (* __noreturn power_off)(void);
+ void (*shutdown)(void);
+ void (*crash_shutdown)(struct pt_regs *);
+- void (*emergency_restart)(void);
+-};
++ void (* __noreturn emergency_restart)(void);
++} __no_const;
+
+ extern struct machine_ops machine_ops;
+
+ void native_machine_crash_shutdown(struct pt_regs *regs);
+ void native_machine_shutdown(void);
+-void machine_real_restart(const unsigned char *code, int length);
++void __noreturn machine_real_restart(const unsigned char *code, unsigned int length);
+
+ typedef void (*nmi_shootdown_cb)(int, struct die_args*);
+ void nmi_shootdown_cpus(nmi_shootdown_cb callback);
+diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
+index 606ede1..dbfff37 100644
+--- a/arch/x86/include/asm/rwsem.h
++++ b/arch/x86/include/asm/rwsem.h
+@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
+ {
+ asm volatile("# beginning down_read\n\t"
+ LOCK_PREFIX _ASM_INC "(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* adds 0x00000001, returns the old value */
+ " jns 1f\n"
+ " call call_rwsem_down_read_failed\n"
+@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
+ "1:\n\t"
+ " mov %1,%2\n\t"
+ " add %3,%2\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "sub %3,%2\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ " jle 2f\n\t"
+ LOCK_PREFIX " cmpxchg %2,%0\n\t"
+ " jnz 1b\n\t"
+@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
+ tmp = RWSEM_ACTIVE_WRITE_BIAS;
+ asm volatile("# beginning down_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* subtract 0x0000ffff, returns the old value */
+ " test %1,%1\n\t"
+ /* was the count 0 before? */
+@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
+ rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
+ asm volatile("# beginning __up_read\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* subtracts 1, returns the old value */
+ " jns 1f\n\t"
+ " call call_rwsem_wake\n"
+@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
+ rwsem_count_t tmp;
+ asm volatile("# beginning __up_write\n\t"
+ LOCK_PREFIX " xadd %1,(%2)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %1,(%2)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /* tries to transition
+ 0xffff0001 -> 0x00000000 */
+ " jz 1f\n"
+@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+ {
+ asm volatile("# beginning __downgrade_write\n\t"
+ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ /*
+ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
+ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
+@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+ static inline void rwsem_atomic_add(rwsem_count_t delta,
+ struct rw_semaphore *sem)
+ {
+- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX _ASM_SUB "%1,%0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (sem->count)
+ : "er" (delta));
+ }
+@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
+ {
+ rwsem_count_t tmp = delta;
+
+- asm volatile(LOCK_PREFIX "xadd %0,%1"
++ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ "mov %0,%1\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+r" (tmp), "+m" (sem->count)
+ : : "memory");
+
+diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
+index 14e0ed8..7f7dd5e 100644
+--- a/arch/x86/include/asm/segment.h
++++ b/arch/x86/include/asm/segment.h
+@@ -62,10 +62,15 @@
+ * 26 - ESPFIX small SS
+ * 27 - per-cpu [ offset to per-cpu data area ]
+ * 28 - stack_canary-20 [ for stack protector ]
+- * 29 - unused
+- * 30 - unused
++ * 29 - PCI BIOS CS
++ * 30 - PCI BIOS DS
+ * 31 - TSS for double fault handler
+ */
++#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
++#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
++#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
++#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
++
+ #define GDT_ENTRY_TLS_MIN 6
+ #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+@@ -77,6 +82,8 @@
+
+ #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
++
+ #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
+
+ #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
+@@ -88,7 +95,7 @@
+ #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
+ #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
+
+-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
++#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
+ #ifdef CONFIG_SMP
+ #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
+ #else
+@@ -102,6 +109,12 @@
+ #define __KERNEL_STACK_CANARY 0
+ #endif
+
++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
++
++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
++
+ #define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+ /*
+@@ -139,7 +152,7 @@
+ */
+
+ /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
+-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
+
+
+ #else
+@@ -163,6 +176,8 @@
+ #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
+ #define __USER32_DS __USER_DS
+
++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
++
+ #define GDT_ENTRY_TSS 8 /* needs two entries */
+ #define GDT_ENTRY_LDT 10 /* needs two entries */
+ #define GDT_ENTRY_TLS_MIN 12
+@@ -183,6 +198,7 @@
+ #endif
+
+ #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
+ #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
+ #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
+ #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
+diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
+index 4c2f63c..5685db2 100644
+--- a/arch/x86/include/asm/smp.h
++++ b/arch/x86/include/asm/smp.h
+@@ -24,7 +24,7 @@ extern unsigned int num_processors;
+ DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+ DECLARE_PER_CPU(u16, cpu_llc_id);
+-DECLARE_PER_CPU(int, cpu_number);
++DECLARE_PER_CPU(unsigned int, cpu_number);
+
+ static inline struct cpumask *cpu_sibling_mask(int cpu)
+ {
+@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
+ DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+
+ /* Static state in head.S used to set up a CPU */
+-extern struct {
+- void *sp;
+- unsigned short ss;
+-} stack_start;
++extern unsigned long stack_start; /* Initial stack pointer address */
+
+ struct smp_ops {
+ void (*smp_prepare_boot_cpu)(void);
+@@ -60,7 +57,7 @@ struct smp_ops {
+
+ void (*send_call_func_ipi)(const struct cpumask *mask);
+ void (*send_call_func_single_ipi)(int cpu);
+-};
++} __no_const;
+
+ /* Globals due to paravirt */
+ extern void set_cpu_sibling_map(int cpu);
+@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
+ extern int safe_smp_processor_id(void);
+
+ #elif defined(CONFIG_X86_64_SMP)
+-#define raw_smp_processor_id() (percpu_read(cpu_number))
+-
+-#define stack_smp_processor_id() \
+-({ \
+- struct thread_info *ti; \
+- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
+- ti->cpu; \
+-})
++#define raw_smp_processor_id() (percpu_read(cpu_number))
++#define stack_smp_processor_id() raw_smp_processor_id()
+ #define safe_smp_processor_id() smp_processor_id()
+
+ #endif
+diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
+index 4e77853..4359783 100644
+--- a/arch/x86/include/asm/spinlock.h
++++ b/arch/x86/include/asm/spinlock.h
+@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+ static inline void __raw_read_lock(raw_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX " addl $1,(%0)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ "jns 1f\n"
+ "call __read_lock_failed\n\t"
+ "1:\n"
+@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
+ static inline void __raw_write_lock(raw_rwlock_t *rw)
+ {
+ asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX " addl %1,(%0)\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ "jz 1f\n"
+ "call __write_lock_failed\n\t"
+ "1:\n"
+@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
+
+ static inline void __raw_read_unlock(raw_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
++ asm volatile(LOCK_PREFIX "incl %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "decl %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
++ :"+m" (rw->lock) : : "memory");
+ }
+
+ static inline void __raw_write_unlock(raw_rwlock_t *rw)
+ {
+- asm volatile(LOCK_PREFIX "addl %1, %0"
++ asm volatile(LOCK_PREFIX "addl %1, %0\n"
++
++#ifdef CONFIG_PAX_REFCOUNT
++ "jno 0f\n"
++ LOCK_PREFIX "subl %1, %0\n"
++ "int $4\n0:\n"
++ _ASM_EXTABLE(0b, 0b)
++#endif
++
+ : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
+ }
+
+diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
+index 1575177..cb23f52 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -48,7 +48,7 @@
+ * head_32 for boot CPU and setup_per_cpu_areas() for others.
+ */
+ #define GDT_STACK_CANARY_INIT \
+- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
++ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
+
+ /*
+ * Initialize the stackprotector canary value.
+@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
+
+ static inline void load_stack_canary_segment(void)
+ {
+-#ifdef CONFIG_X86_32
++#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
+ asm volatile ("mov %0, %%gs" : : "r" (0));
+ #endif
+ }
+diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
+index e0fbf29..858ef4a 100644
+--- a/arch/x86/include/asm/system.h
++++ b/arch/x86/include/asm/system.h
+@@ -132,7 +132,7 @@ do { \
+ "thread_return:\n\t" \
+ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
+ __switch_canary \
+- "movq %P[thread_info](%%rsi),%%r8\n\t" \
++ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
+ "movq %%rax,%%rdi\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "jnz ret_from_fork\n\t" \
+@@ -143,7 +143,7 @@ do { \
+ [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
+ [ti_flags] "i" (offsetof(struct thread_info, flags)), \
+ [_tif_fork] "i" (_TIF_FORK), \
+- [thread_info] "i" (offsetof(struct task_struct, stack)), \
++ [thread_info] "m" (per_cpu_var(current_tinfo)), \
+ [current_task] "m" (per_cpu_var(current_task)) \
+ __switch_canary_iparam \
+ : "memory", "cc" __EXTRA_CLOBBER)
+@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
+ {
+ unsigned long __limit;
+ asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
+- return __limit + 1;
++ return __limit;
+ }
+
+ static inline void native_clts(void)
+@@ -340,12 +340,12 @@ void enable_hlt(void);
+
+ void cpu_idle_wait(void);
+
+-extern unsigned long arch_align_stack(unsigned long sp);
++#define arch_align_stack(x) ((x) & ~0xfUL)
+ extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
+
+ void default_idle(void);
+
+-void stop_this_cpu(void *dummy);
++void stop_this_cpu(void *dummy) __noreturn;
+
+ /*
+ * Force strict CPU ordering.
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index 19c3ce4..4ad5ba4 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler.h>
+ #include <asm/page.h>
+ #include <asm/types.h>
++#include <asm/percpu.h>
+
+ /*
+ * low level task data that entry.S needs immediate access to
+@@ -24,7 +25,6 @@ struct exec_domain;
+ #include <asm/atomic.h>
+
+ struct thread_info {
+- struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ __u32 flags; /* low level flags */
+ __u32 status; /* thread synchronous flags */
+@@ -34,18 +34,12 @@ struct thread_info {
+ mm_segment_t addr_limit;
+ struct restart_block restart_block;
+ void __user *sysenter_return;
+-#ifdef CONFIG_X86_32
+- unsigned long previous_esp; /* ESP of the previous stack in
+- case of nested (IRQ) stacks
+- */
+- __u8 supervisor_stack[0];
+-#endif
++ unsigned long lowest_stack;
+ int uaccess_err;
+ };
+
+-#define INIT_THREAD_INFO(tsk) \
++#define INIT_THREAD_INFO \
+ { \
+- .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+@@ -56,7 +50,7 @@ struct thread_info {
+ }, \
+ }
+
+-#define init_thread_info (init_thread_union.thread_info)
++#define init_thread_info (init_thread_union.stack)
+ #define init_stack (init_thread_union.stack)
+
+ #else /* !__ASSEMBLY__ */
+@@ -95,6 +89,7 @@ struct thread_info {
+ #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */
+ #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
+ #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
++#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
+
+ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+@@ -117,16 +112,17 @@ struct thread_info {
+ #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
+ #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
+ #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
++#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
+
+ /* work to do in syscall_trace_enter() */
+ #define _TIF_WORK_SYSCALL_ENTRY \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
+- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
++ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+
+ /* work to do in syscall_trace_leave() */
+ #define _TIF_WORK_SYSCALL_EXIT \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
+- _TIF_SYSCALL_TRACEPOINT)
++ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
+
+ /* work to do on interrupt/exception return */
+ #define _TIF_WORK_MASK \
+@@ -136,7 +132,8 @@ struct thread_info {
+
+ /* work to do on any return to user space */
+ #define _TIF_ALLWORK_MASK \
+- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
++ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
++ _TIF_GRSEC_SETXID)
+
+ /* Only used for 64 bit */
+ #define _TIF_DO_NOTIFY_MASK \
+@@ -163,45 +160,40 @@ struct thread_info {
+ #define alloc_thread_info(tsk) \
+ ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
+
+-#ifdef CONFIG_X86_32
+-
+-#define STACK_WARN (THREAD_SIZE/8)
+-/*
+- * macros/functions for gaining access to the thread information structure
+- *
+- * preempt_count needs to be 1 initially, until the scheduler is functional.
+- */
+-#ifndef __ASSEMBLY__
+-
+-
+-/* how to get the current stack pointer from C */
+-register unsigned long current_stack_pointer asm("esp") __used;
+-
+-/* how to get the thread information struct from C */
+-static inline struct thread_info *current_thread_info(void)
+-{
+- return (struct thread_info *)
+- (current_stack_pointer & ~(THREAD_SIZE - 1));
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
++#ifdef __ASSEMBLY__
+ /* how to get the thread information struct from ASM */
+ #define GET_THREAD_INFO(reg) \
+- movl $-THREAD_SIZE, reg; \
+- andl %esp, reg
++ mov PER_CPU_VAR(current_tinfo), reg
+
+ /* use this one if reg already contains %esp */
+-#define GET_THREAD_INFO_WITH_ESP(reg) \
+- andl $-THREAD_SIZE, reg
++#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
++#else
++/* how to get the thread information struct from C */
++DECLARE_PER_CPU(struct thread_info *, current_tinfo);
++
++static __always_inline struct thread_info *current_thread_info(void)
++{
++ return percpu_read_stable(current_tinfo);
++}
++#endif
++
++#ifdef CONFIG_X86_32
++
++#define STACK_WARN (THREAD_SIZE/8)
++/*
++ * macros/functions for gaining access to the thread information structure
++ *
++ * preempt_count needs to be 1 initially, until the scheduler is functional.
++ */
++#ifndef __ASSEMBLY__
++
++/* how to get the current stack pointer from C */
++register unsigned long current_stack_pointer asm("esp") __used;
+
+ #endif
+
+ #else /* X86_32 */
+
+-#include <asm/percpu.h>
+-#define KERNEL_STACK_OFFSET (5*8)
+-
+ /*
+ * macros/functions for gaining access to the thread information structure
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+@@ -209,21 +201,8 @@ static inline struct thread_info *current_thread_info(void)
+ #ifndef __ASSEMBLY__
+ DECLARE_PER_CPU(unsigned long, kernel_stack);
+
+-static inline struct thread_info *current_thread_info(void)
+-{
+- struct thread_info *ti;
+- ti = (void *)(percpu_read_stable(kernel_stack) +
+- KERNEL_STACK_OFFSET - THREAD_SIZE);
+- return ti;
+-}
+-
+-#else /* !__ASSEMBLY__ */
+-
+-/* how to get the thread information struct from ASM */
+-#define GET_THREAD_INFO(reg) \
+- movq PER_CPU_VAR(kernel_stack),reg ; \
+- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
+-
++/* how to get the current stack pointer from C */
++register unsigned long current_stack_pointer asm("rsp") __used;
+ #endif
+
+ #endif /* !X86_32 */
+@@ -260,5 +239,16 @@ extern void arch_task_cache_init(void);
+ extern void free_thread_info(struct thread_info *ti);
+ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+ #define arch_task_cache_init arch_task_cache_init
++
++#define __HAVE_THREAD_FUNCTIONS
++#define task_thread_info(task) (&(task)->tinfo)
++#define task_stack_page(task) ((task)->stack)
++#define setup_thread_stack(p, org) do {} while (0)
++#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
++
++#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
++extern struct task_struct *alloc_task_struct(void);
++extern void free_task_struct(struct task_struct *);
++
+ #endif
+ #endif /* _ASM_X86_THREAD_INFO_H */
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index 61c5874..28d8b16 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -8,6 +8,7 @@
+ #include <linux/thread_info.h>
+ #include <linux/prefetch.h>
+ #include <linux/string.h>
++#include <linux/sched.h>
+ #include <asm/asm.h>
+ #include <asm/page.h>
+
+@@ -29,7 +30,12 @@
+
+ #define get_ds() (KERNEL_DS)
+ #define get_fs() (current_thread_info()->addr_limit)
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++void __set_fs(mm_segment_t x);
++void set_fs(mm_segment_t x);
++#else
+ #define set_fs(x) (current_thread_info()->addr_limit = (x))
++#endif
+
+ #define segment_eq(a, b) ((a).seg == (b).seg)
+
+@@ -77,7 +83,33 @@
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
++#define access_ok(type, addr, size) \
++({ \
++ long __size = size; \
++ unsigned long __addr = (unsigned long)addr; \
++ unsigned long __addr_ao = __addr & PAGE_MASK; \
++ unsigned long __end_ao = __addr + __size - 1; \
++ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
++ while(__addr_ao <= __end_ao) { \
++ char __c_ao; \
++ __addr_ao += PAGE_SIZE; \
++ if (__size > PAGE_SIZE) \
++ cond_resched(); \
++ if (__get_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ if (type != VERIFY_WRITE) { \
++ __addr = __addr_ao; \
++ continue; \
++ } \
++ if (__put_user(__c_ao, (char __user *)__addr)) \
++ break; \
++ __addr = __addr_ao; \
++ } \
++ } \
++ __ret_ao; \
++})
+
+ /*
+ * The exception table consists of pairs of addresses: the first is the
+@@ -183,12 +215,20 @@ extern int __get_user_bad(void);
+ asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+
+-
++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __copyuser_seg "gs;"
++#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
++#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
++#else
++#define __copyuser_seg
++#define __COPYUSER_SET_ES
++#define __COPYUSER_RESTORE_ES
++#endif
+
+ #ifdef CONFIG_X86_32
+ #define __put_user_asm_u64(x, addr, err, errret) \
+- asm volatile("1: movl %%eax,0(%2)\n" \
+- "2: movl %%edx,4(%2)\n" \
++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
++ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "4: movl %3,%0\n" \
+@@ -200,8 +240,8 @@ extern int __get_user_bad(void);
+ : "A" (x), "r" (addr), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex_u64(x, addr) \
+- asm volatile("1: movl %%eax,0(%1)\n" \
+- "2: movl %%edx,4(%1)\n" \
++ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
++ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
+ "3:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ _ASM_EXTABLE(2b, 3b - 2b) \
+@@ -253,7 +293,7 @@ extern void __put_user_8(void);
+ __typeof__(*(ptr)) __pu_val; \
+ __chk_user_ptr(ptr); \
+ might_fault(); \
+- __pu_val = x; \
++ __pu_val = (x); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __put_user_x(1, __pu_val, ptr, __ret_pu); \
+@@ -374,7 +414,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -382,7 +422,7 @@ do { \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+- : "=r" (err), ltype(x) \
++ : "=r" (err), ltype (x) \
+ : "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __get_user_size_ex(x, ptr, size) \
+@@ -407,7 +447,7 @@ do { \
+ } while (0)
+
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : ltype(x) : "m" (__m(addr)))
+@@ -424,13 +464,24 @@ do { \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+- (x) = (__force __typeof__(*(ptr)))__gu_val; \
++ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+ })
+
+ /* FIXME: this hack is definitely wrong -AK */
+ struct __large_struct { unsigned long buf[100]; };
+-#define __m(x) (*(struct __large_struct __user *)(x))
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define ____m(x) \
++({ \
++ unsigned long ____x = (unsigned long)(x); \
++ if (____x < PAX_USER_SHADOW_BASE) \
++ ____x += PAX_USER_SHADOW_BASE; \
++ (void __user *)____x; \
++})
++#else
++#define ____m(x) (x)
++#endif
++#define __m(x) (*(struct __large_struct __user *)____m(x))
+
+ /*
+ * Tell gcc we read from memory instead of writing: this is because
+@@ -438,7 +489,7 @@ struct __large_struct { unsigned long buf[100]; };
+ * aliasing issues.
+ */
+ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: mov %3,%0\n" \
+@@ -446,10 +497,10 @@ struct __large_struct { unsigned long buf[100]; };
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r"(err) \
+- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+ #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
+- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
++ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
+ "2:\n" \
+ _ASM_EXTABLE(1b, 2b - 1b) \
+ : : ltype(x), "m" (__m(addr)))
+@@ -488,8 +539,12 @@ struct __large_struct { unsigned long buf[100]; };
+ * On error, the variable @x is set to zero.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __get_user(x, ptr) get_user((x), (ptr))
++#else
+ #define __get_user(x, ptr) \
+ __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
++#endif
+
+ /**
+ * __put_user: - Write a simple value into user space, with less checking.
+@@ -511,8 +566,12 @@ struct __large_struct { unsigned long buf[100]; };
+ * Returns zero on success, or -EFAULT on error.
+ */
+
++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
++#define __put_user(x, ptr) put_user((x), (ptr))
++#else
+ #define __put_user(x, ptr) \
+ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
++#endif
+
+ #define __get_user_unaligned __get_user
+ #define __put_user_unaligned __put_user
+@@ -530,7 +589,7 @@ struct __large_struct { unsigned long buf[100]; };
+ #define get_user_ex(x, ptr) do { \
+ unsigned long __gue_val; \
+ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
+- (x) = (__force __typeof__(*(ptr)))__gue_val; \
++ (x) = (__typeof__(*(ptr)))__gue_val; \
+ } while (0)
+
+ #ifdef CONFIG_X86_WP_WORKS_OK
+@@ -567,6 +626,7 @@ extern struct movsl_mask {
+
+ #define ARCH_HAS_NOCACHE_UACCESS 1
+
++#define ARCH_HAS_SORT_EXTABLE
+ #ifdef CONFIG_X86_32
+ # include "uaccess_32.h"
+ #else
+diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
+index 632fb44..b284405 100644
+--- a/arch/x86/include/asm/uaccess_32.h
++++ b/arch/x86/include/asm/uaccess_32.h
+@@ -12,15 +12,15 @@
+ #include <asm/page.h>
+
+ unsigned long __must_check __copy_to_user_ll
+- (void __user *to, const void *from, unsigned long n);
++ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll
+- (void *to, const void __user *from, unsigned long n);
++ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll_nozero
+- (void *to, const void __user *from, unsigned long n);
++ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll_nocache
+- (void *to, const void __user *from, unsigned long n);
++ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+ unsigned long __must_check __copy_from_user_ll_nocache_nozero
+- (void *to, const void __user *from, unsigned long n);
++ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
+
+ /**
+ * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
+@@ -44,6 +44,13 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
+ static __always_inline unsigned long __must_check
+ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+ {
++ pax_track_stack();
++
++ if ((long)n < 0)
++ return n;
++
++ check_object_size(from, n, true);
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+ {
+ might_fault();
++
+ return __copy_to_user_inatomic(to, from, n);
+ }
+
+ static __always_inline unsigned long
+ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+ {
++ if ((long)n < 0)
++ return n;
++
+ /* Avoid zeroing the tail if the copy fails..
+ * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
+ * but as the zeroing behaviour is only significant when n is not
+@@ -138,6 +149,14 @@ static __always_inline unsigned long
+ __copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ pax_track_stack();
++
++ if ((long)n < 0)
++ return n;
++
++ check_object_size(to, n, false);
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
+ const void __user *from, unsigned long n)
+ {
+ might_fault();
++
++ if ((long)n < 0)
++ return n;
++
+ if (__builtin_constant_p(n)) {
+ unsigned long ret;
+
+@@ -182,14 +205,72 @@ static __always_inline unsigned long
+ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
+ unsigned long n)
+ {
+- return __copy_from_user_ll_nocache_nozero(to, from, n);
++ if ((long)n < 0)
++ return n;
++
++ return __copy_from_user_ll_nocache_nozero(to, from, n);
++}
++
++extern void copy_to_user_overflow(void) __compiletime_warning("copy_to_user() buffer size is not provably correct");
++extern void copy_from_user_overflow(void) __compiletime_warning("copy_from_user() buffer size is not provably correct");
++
++/**
++ * copy_to_user: - Copy a block of data into user space.
++ * @to: Destination address, in user space.
++ * @from: Source address, in kernel space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from kernel space to user space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ */
++static __always_inline unsigned long __must_check
++copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ size_t sz = __compiletime_object_size(from);
++
++ if (unlikely(sz != (size_t)-1 && sz < n))
++ copy_to_user_overflow();
++ else if (access_ok(VERIFY_WRITE, to, n))
++ n = __copy_to_user(to, from, n);
++ return n;
++}
++
++/**
++ * copy_from_user: - Copy a block of data from user space.
++ * @to: Destination address, in kernel space.
++ * @from: Source address, in user space.
++ * @n: Number of bytes to copy.
++ *
++ * Context: User context only. This function may sleep.
++ *
++ * Copy data from user space to kernel space.
++ *
++ * Returns number of bytes that could not be copied.
++ * On success, this will be zero.
++ *
++ * If some data could not be copied, this function will pad the copied
++ * data to the requested size using zero bytes.
++ */
++static __always_inline unsigned long __must_check
++copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++ size_t sz = __compiletime_object_size(to);
++
++ check_object_size(to, n, false);
++
++ if (unlikely(sz != (size_t)-1 && sz < n))
++ copy_from_user_overflow();
++ else if (access_ok(VERIFY_READ, from, n))
++ n = __copy_from_user(to, from, n);
++ else if ((long)n > 0)
++ memset(to, 0, n);
++ return n;
+ }
+
+-unsigned long __must_check copy_to_user(void __user *to,
+- const void *from, unsigned long n);
+-unsigned long __must_check copy_from_user(void *to,
+- const void __user *from,
+- unsigned long n);
+ long __must_check strncpy_from_user(char *dst, const char __user *src,
+ long count);
+ long __must_check __strncpy_from_user(char *dst,
+@@ -212,7 +293,7 @@ long __must_check __strncpy_from_user(char *dst,
+ #define strlen_user(str) strnlen_user(str, LONG_MAX)
+
+ long strnlen_user(const char __user *str, long n);
+-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
+-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
++unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
++unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
+
+ #endif /* _ASM_X86_UACCESS_32_H */
+diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
+index db24b21..9fa9f18 100644
+--- a/arch/x86/include/asm/uaccess_64.h
++++ b/arch/x86/include/asm/uaccess_64.h
+@@ -9,6 +9,9 @@
+ #include <linux/prefetch.h>
+ #include <linux/lockdep.h>
+ #include <asm/page.h>
++#include <asm/pgtable.h>
++
++#define set_fs(x) (current_thread_info()->addr_limit = (x))
+
+ /*
+ * Copy To/From Userspace
+@@ -16,116 +19,187 @@
+
+ /* Handles exceptions in both to and from, but doesn't do access_ok */
+ __must_check unsigned long
+-copy_user_generic(void *to, const void *from, unsigned len);
++copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
+
+ __must_check unsigned long
+-copy_to_user(void __user *to, const void *from, unsigned len);
+-__must_check unsigned long
+-copy_from_user(void *to, const void __user *from, unsigned len);
+-__must_check unsigned long
+-copy_in_user(void __user *to, const void __user *from, unsigned len);
++copy_in_user(void __user *to, const void __user *from, unsigned long len);
++
++extern void copy_to_user_overflow(void) __compiletime_warning("copy_to_user() buffer size is not provably correct");
++extern void copy_from_user_overflow(void) __compiletime_warning("copy_from_user() buffer size is not provably correct");
+
+ static __always_inline __must_check
+-int __copy_from_user(void *dst, const void __user *src, unsigned size)
++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
+ {
+- int ret = 0;
++ size_t sz = __compiletime_object_size(dst);
++ unsigned ret = 0;
+
+ might_fault();
+- if (!__builtin_constant_p(size))
+- return copy_user_generic(dst, (__force void *)src, size);
++
++ if (size > INT_MAX)
++ return size;
++
++ check_object_size(dst, size, false);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
++ if (unlikely(sz != (size_t)-1 && sz < size)) {
++ copy_from_user_overflow();
++ return size;
++ }
++
++ if (!__builtin_constant_p(size)) {
++ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
++ }
+ switch (size) {
+- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
++ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ return ret;
+- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
++ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ return ret;
+- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
++ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ return ret;
+- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ return ret;
+ case 10:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 10);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u16 *)(8 + (char *)dst),
+- (u16 __user *)(8 + (char __user *)src),
++ (const u16 __user *)(8 + (const char __user *)src),
+ ret, "w", "w", "=r", 2);
+ return ret;
+ case 16:
+- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
++ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
+ ret, "q", "", "=r", 16);
+ if (unlikely(ret))
+ return ret;
+ __get_user_asm(*(u64 *)(8 + (char *)dst),
+- (u64 __user *)(8 + (char __user *)src),
++ (const u64 __user *)(8 + (const char __user *)src),
+ ret, "q", "", "=r", 8);
+ return ret;
+ default:
+- return copy_user_generic(dst, (__force void *)src, size);
++ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_to_user(void __user *dst, const void *src, unsigned size)
++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
+ {
+- int ret = 0;
++ size_t sz = __compiletime_object_size(dst);
++ unsigned ret = 0;
+
+ might_fault();
++
++ pax_track_stack();
++
++ if (size > INT_MAX)
++ return size;
++
++ check_object_size(src, size, true);
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
++ if (unlikely(sz != (size_t)-1 && sz < size)) {
++ copy_to_user_overflow();
++ return size;
++ }
++
+ if (!__builtin_constant_p(size))
+- return copy_user_generic((__force void *)dst, src, size);
++ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
+ switch (size) {
+- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
++ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
+ ret, "b", "b", "iq", 1);
+ return ret;
+- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
++ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
++ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
+ ret, "l", "k", "ir", 4);
+ return ret;
+- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ case 10:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 10);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
++ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ return ret;
+ case 16:
+- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
++ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
+ ret, "q", "", "er", 16);
+ if (unlikely(ret))
+ return ret;
+ asm("":::"memory");
+- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
++ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ return ret;
+ default:
+- return copy_user_generic((__force void *)dst, src, size);
++ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
+ }
+ }
+
+ static __always_inline __must_check
+-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
++unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
+ {
+- int ret = 0;
++ if (access_ok(VERIFY_WRITE, to, len))
++ len = __copy_to_user(to, from, len);
++ return len;
++}
+
++static __always_inline __must_check
++unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
++{
+ might_fault();
++
++ check_object_size(to, len, false);
++
++ if (access_ok(VERIFY_READ, from, len))
++ len = __copy_from_user(to, from, len);
++ else if (len < INT_MAX)
++ memset(to, 0, len);
++ return len;
++}
++
++static __always_inline __must_check
++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
++{
++ unsigned ret = 0;
++
++ might_fault();
++
++ pax_track_stack();
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++ if (!__access_ok(VERIFY_WRITE, dst, size))
++ return size;
++#endif
++
+ if (!__builtin_constant_p(size))
+- return copy_user_generic((__force void *)dst,
+- (__force void *)src, size);
++ return copy_user_generic((__force_kernel void *)____m(dst),
++ (__force_kernel const void *)____m(src), size);
+ switch (size) {
+ case 1: {
+ u8 tmp;
+- __get_user_asm(tmp, (u8 __user *)src,
++ __get_user_asm(tmp, (const u8 __user *)src,
+ ret, "b", "b", "=q", 1);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u8 __user *)dst,
+@@ -134,7 +208,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ }
+ case 2: {
+ u16 tmp;
+- __get_user_asm(tmp, (u16 __user *)src,
++ __get_user_asm(tmp, (const u16 __user *)src,
+ ret, "w", "w", "=r", 2);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u16 __user *)dst,
+@@ -144,7 +218,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+
+ case 4: {
+ u32 tmp;
+- __get_user_asm(tmp, (u32 __user *)src,
++ __get_user_asm(tmp, (const u32 __user *)src,
+ ret, "l", "k", "=r", 4);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u32 __user *)dst,
+@@ -153,7 +227,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ }
+ case 8: {
+ u64 tmp;
+- __get_user_asm(tmp, (u64 __user *)src,
++ __get_user_asm(tmp, (const u64 __user *)src,
+ ret, "q", "", "=r", 8);
+ if (likely(!ret))
+ __put_user_asm(tmp, (u64 __user *)dst,
+@@ -161,8 +235,8 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
+ return ret;
+ }
+ default:
+- return copy_user_generic((__force void *)dst,
+- (__force void *)src, size);
++ return copy_user_generic((__force_kernel void *)____m(dst),
++ (__force_kernel const void *)____m(src), size);
+ }
+ }
+
+@@ -173,36 +247,62 @@ __strncpy_from_user(char *dst, const char __user *src, long count);
+ __must_check long strnlen_user(const char __user *str, long n);
+ __must_check long __strnlen_user(const char __user *str, long n);
+ __must_check long strlen_user(const char __user *str);
+-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
+-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
++__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
++__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
+
+-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
+- unsigned size);
++static __must_check __always_inline unsigned long
++__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
++{
++ pax_track_stack();
++
++ if (size > INT_MAX)
++ return size;
+
+-static __must_check __always_inline int
+-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
++ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
++}
++
++static __must_check __always_inline unsigned long
++__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
+ {
+- return copy_user_generic((__force void *)dst, src, size);
++ if (size > INT_MAX)
++ return size;
++
++ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
+ }
+
+-extern long __copy_user_nocache(void *dst, const void __user *src,
+- unsigned size, int zerorest);
++extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
++ unsigned long size, int zerorest) __size_overflow(3);
+
+-static inline int
+-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
+ {
+ might_sleep();
++
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ return __copy_user_nocache(dst, src, size, 1);
+ }
+
+-static inline int
+-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+- unsigned size)
++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
++ unsigned long size)
+ {
++ if (size > INT_MAX)
++ return size;
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ if (!__access_ok(VERIFY_READ, src, size))
++ return size;
++#endif
++
+ return __copy_user_nocache(dst, src, size, 0);
+ }
+
+-unsigned long
+-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
++extern unsigned long
++copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
+
+ #endif /* _ASM_X86_UACCESS_64_H */
+diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
+index 9064052..786cfbc 100644
+--- a/arch/x86/include/asm/vdso.h
++++ b/arch/x86/include/asm/vdso.h
+@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
+ #define VDSO32_SYMBOL(base, name) \
+ ({ \
+ extern const char VDSO32_##name[]; \
+- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
++ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
+ })
+ #endif
+
+diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
+index 3d61e20..9507180 100644
+--- a/arch/x86/include/asm/vgtod.h
++++ b/arch/x86/include/asm/vgtod.h
+@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
+ int sysctl_enabled;
+ struct timezone sys_tz;
+ struct { /* extract of a clocksource struct */
++ char name[8];
+ cycle_t (*vread)(void);
+ cycle_t cycle_last;
+ cycle_t mask;
+diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
+index 61e08c0..b0da582 100644
+--- a/arch/x86/include/asm/vmi.h
++++ b/arch/x86/include/asm/vmi.h
+@@ -191,6 +191,7 @@ struct vrom_header {
+ u8 reserved[96]; /* Reserved for headers */
+ char vmi_init[8]; /* VMI_Init jump point */
+ char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
++ char rom_data[8048]; /* rest of the option ROM */
+ } __attribute__((packed));
+
+ struct pnp_header {
+diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
+index c6e0bee..fcb9f74 100644
+--- a/arch/x86/include/asm/vmi_time.h
++++ b/arch/x86/include/asm/vmi_time.h
+@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
+ int (*wallclock_updated)(void);
+ void (*set_alarm)(u32 flags, u64 expiry, u64 period);
+ void (*cancel_alarm)(u32 flags);
+-} vmi_timer_ops;
++} __no_const vmi_timer_ops;
+
+ /* Prototypes */
+ extern void __init vmi_time_init(void);
+diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
+index d0983d2..1f7c9e9 100644
+--- a/arch/x86/include/asm/vsyscall.h
++++ b/arch/x86/include/asm/vsyscall.h
+@@ -15,9 +15,10 @@ enum vsyscall_num {
+
+ #ifdef __KERNEL__
+ #include <linux/seqlock.h>
++#include <linux/getcpu.h>
++#include <linux/time.h>
+
+ #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
+-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
+
+ /* Definitions for CONFIG_GENERIC_TIME definitions */
+ #define __section_vsyscall_gtod_data __attribute__ \
+@@ -31,7 +32,6 @@ enum vsyscall_num {
+ #define VGETCPU_LSL 2
+
+ extern int __vgetcpu_mode;
+-extern volatile unsigned long __jiffies;
+
+ /* kernel space (writeable) */
+ extern int vgetcpu_mode;
+@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
+
+ extern void map_vsyscall(void);
+
++extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
++extern time_t vtime(time_t *t);
++extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
+ #endif /* __KERNEL__ */
+
+ #endif /* _ASM_X86_VSYSCALL_H */
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index 2c756fd..062473d 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -88,7 +88,7 @@ struct x86_init_timers {
+ void (*setup_percpu_clockev)(void);
+ void (*tsc_pre_init)(void);
+ void (*timer_init)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_init_ops - functions for platform specific setup
+@@ -101,7 +101,7 @@ struct x86_init_ops {
+ struct x86_init_oem oem;
+ struct x86_init_paging paging;
+ struct x86_init_timers timers;
+-};
++} __no_const;
+
+ /**
+ * struct x86_cpuinit_ops - platform specific cpu hotplug setups
+@@ -109,7 +109,7 @@ struct x86_init_ops {
+ */
+ struct x86_cpuinit_ops {
+ void (*setup_percpu_clockev)(void);
+-};
++} __no_const;
+
+ /**
+ * struct x86_platform_ops - platform specific runtime functions
+@@ -121,7 +121,7 @@ struct x86_platform_ops {
+ unsigned long (*calibrate_tsc)(void);
+ unsigned long (*get_wallclock)(void);
+ int (*set_wallclock)(unsigned long nowtime);
+-};
++} __no_const;
+
+ extern struct x86_init_ops x86_init;
+ extern struct x86_cpuinit_ops x86_cpuinit;
+diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
+index 727acc1..52c9e4c 100644
+--- a/arch/x86/include/asm/xsave.h
++++ b/arch/x86/include/asm/xsave.h
+@@ -56,7 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
+ static inline int xsave_user(struct xsave_struct __user *buf)
+ {
+ int err;
+- __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
++
++ buf = (struct xsave_struct __user *)____m(buf);
++
++ __asm__ __volatile__("1:"
++ __copyuser_seg
++ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl $-1,%[err]\n"
+@@ -78,11 +83,13 @@ static inline int xsave_user(struct xsave_struct __user *buf)
+ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
+ {
+ int err;
+- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
++ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)____m(buf));
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+
+- __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
++ __asm__ __volatile__("1:"
++ __copyuser_seg
++ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl $-1,%[err]\n"
+diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
+index 6a564ac..3f3a3d7 100644
+--- a/arch/x86/kernel/acpi/realmode/Makefile
++++ b/arch/x86/kernel/acpi/realmode/Makefile
+@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+ KBUILD_CFLAGS += $(call cc-option, -m32)
++ifdef CONSTIFY_PLUGIN
++KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
++endif
+ KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+ GCOV_PROFILE := n
+
+diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
+index 580b4e2..d4129e4 100644
+--- a/arch/x86/kernel/acpi/realmode/wakeup.S
++++ b/arch/x86/kernel/acpi/realmode/wakeup.S
+@@ -91,6 +91,9 @@ _start:
+ /* Do any other stuff... */
+
+ #ifndef CONFIG_64BIT
++ /* Recheck NX bit overrides (64bit path does this in trampoline) */
++ call verify_cpu
++
+ /* This could also be done in C code... */
+ movl pmode_cr3, %eax
+ movl %eax, %cr3
+@@ -104,7 +107,7 @@ _start:
+ movl %eax, %ecx
+ orl %edx, %ecx
+ jz 1f
+- movl $0xc0000080, %ecx
++ mov $MSR_EFER, %ecx
+ wrmsr
+ 1:
+
+@@ -114,6 +117,7 @@ _start:
+ movl pmode_cr0, %eax
+ movl %eax, %cr0
+ jmp pmode_return
++# include "../../verify_cpu.S"
+ #else
+ pushw $0
+ pushw trampoline_segment
+diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
+index ca93638..7042f24 100644
+--- a/arch/x86/kernel/acpi/sleep.c
++++ b/arch/x86/kernel/acpi/sleep.c
+@@ -11,11 +11,12 @@
+ #include <linux/cpumask.h>
+ #include <asm/segment.h>
+ #include <asm/desc.h>
++#include <asm/e820.h>
+
+ #include "realmode/wakeup.h"
+ #include "sleep.h"
+
+-unsigned long acpi_wakeup_address;
++unsigned long acpi_wakeup_address = 0x2000;
+ unsigned long acpi_realmode_flags;
+
+ /* address in low memory of the wakeup routine. */
+@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
+ #else /* CONFIG_64BIT */
+ header->trampoline_segment = setup_trampoline() >> 4;
+ #ifdef CONFIG_SMP
+- stack_start.sp = temp_stack + sizeof(temp_stack);
++ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
++
++ pax_open_kernel();
+ early_gdt_descr.address =
+ (unsigned long)get_cpu_gdt_table(smp_processor_id());
++ pax_close_kernel();
++
+ initial_gs = per_cpu_offset(smp_processor_id());
+ #endif
+ initial_code = (unsigned long)wakeup_long64;
+@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
+ return;
+ }
+
+- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
+-
+- if (!acpi_realmode) {
+- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
+- return;
+- }
+-
+- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
++ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
++ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
+ }
+
+
+diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
+index 8ded418..079961e 100644
+--- a/arch/x86/kernel/acpi/wakeup_32.S
++++ b/arch/x86/kernel/acpi/wakeup_32.S
+@@ -30,13 +30,11 @@ wakeup_pmode_return:
+ # and restore the stack ... but you need gdt for this to work
+ movl saved_context_esp, %esp
+
+- movl %cs:saved_magic, %eax
+- cmpl $0x12345678, %eax
++ cmpl $0x12345678, saved_magic
+ jne bogus_magic
+
+ # jump to place where we left off
+- movl saved_eip, %eax
+- jmp *%eax
++ jmp *(saved_eip)
+
+ bogus_magic:
+ jmp bogus_magic
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index de7353c..075da5f 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
+
+ BUG_ON(p->len > MAX_PATCH_LEN);
+ /* prep the buffer with the original instructions */
+- memcpy(insnbuf, p->instr, p->len);
++ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
+ used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
+ (unsigned long)p->instr, p->len);
+
+@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
+ if (smp_alt_once)
+ free_init_pages("SMP alternatives",
+ (unsigned long)__smp_locks,
+- (unsigned long)__smp_locks_end);
++ PAGE_ALIGN((unsigned long)__smp_locks_end));
+
+ restart_nmi();
+ }
+@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
+ * instructions. And on the local CPU you need to be protected again NMI or MCE
+ * handlers seeing an inconsistent instruction while you patch.
+ */
+-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
++static void *__kprobes text_poke_early(void *addr, const void *opcode,
+ size_t len)
+ {
+ unsigned long flags;
+ local_irq_save(flags);
+- memcpy(addr, opcode, len);
++
++ pax_open_kernel();
++ memcpy(ktla_ktva(addr), opcode, len);
+ sync_core();
++ pax_close_kernel();
++
+ local_irq_restore(flags);
+ /* Could also do a CLFLUSH here to speed up CPU recovery; but
+ that causes hangs on some VIA CPUs. */
+@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
+ */
+ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
+ {
+- unsigned long flags;
+- char *vaddr;
++ unsigned char *vaddr = ktla_ktva(addr);
+ struct page *pages[2];
+- int i;
++ size_t i;
+
+ if (!core_kernel_text((unsigned long)addr)) {
+- pages[0] = vmalloc_to_page(addr);
+- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
++ pages[0] = vmalloc_to_page(vaddr);
++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
+ } else {
+- pages[0] = virt_to_page(addr);
++ pages[0] = virt_to_page(vaddr);
+ WARN_ON(!PageReserved(pages[0]));
+- pages[1] = virt_to_page(addr + PAGE_SIZE);
++ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
+ }
+ BUG_ON(!pages[0]);
+- local_irq_save(flags);
+- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
+- if (pages[1])
+- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
+- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
+- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
+- clear_fixmap(FIX_TEXT_POKE0);
+- if (pages[1])
+- clear_fixmap(FIX_TEXT_POKE1);
+- local_flush_tlb();
+- sync_core();
+- /* Could also do a CLFLUSH here to speed up CPU recovery; but
+- that causes hangs on some VIA CPUs. */
++ text_poke_early(addr, opcode, len);
+ for (i = 0; i < len; i++)
+- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
+- local_irq_restore(flags);
++ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
+ return addr;
+ }
+diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
+index 3a44b75..1601800 100644
+--- a/arch/x86/kernel/amd_iommu.c
++++ b/arch/x86/kernel/amd_iommu.c
+@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
+ }
+ }
+
+-static struct dma_map_ops amd_iommu_dma_ops = {
++static const struct dma_map_ops amd_iommu_dma_ops = {
+ .alloc_coherent = alloc_coherent,
+ .free_coherent = free_coherent,
+ .map_page = map_page,
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 1d2d670..8e3f477 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
+ /*
+ * Debug level, exported for io_apic.c
+ */
+-unsigned int apic_verbosity;
++int apic_verbosity;
+
+ int pic_mode;
+
+@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
+ apic_write(APIC_ESR, 0);
+ v1 = apic_read(APIC_ESR);
+ ack_APIC_irq();
+- atomic_inc(&irq_err_count);
++ atomic_inc_unchecked(&irq_err_count);
+
+ /*
+ * Here is what the APIC error bits mean:
+@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
+ u16 *bios_cpu_apicid;
+ DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
+
++ pax_track_stack();
++
+ bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
+ bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
+
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index d256bc3..627a02d 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
+ ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
+ GFP_ATOMIC);
+ if (!ioapic_entries)
+- return 0;
++ return NULL;
+
+ for (apic = 0; apic < nr_ioapics; apic++) {
+ ioapic_entries[apic] =
+@@ -733,7 +733,7 @@ nomem:
+ kfree(ioapic_entries[apic]);
+ kfree(ioapic_entries);
+
+- return 0;
++ return NULL;
+ }
+
+ /*
+@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
+ }
+ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+
+-void lock_vector_lock(void)
++void lock_vector_lock(void) __acquires(vector_lock)
+ {
+ /* Used to the online set of cpus does not change
+ * during assign_irq_vector.
+@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
+ spin_lock(&vector_lock);
+ }
+
+-void unlock_vector_lock(void)
++void unlock_vector_lock(void) __releases(vector_lock)
+ {
+ spin_unlock(&vector_lock);
+ }
+@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
+ ack_APIC_irq();
+ }
+
+-atomic_t irq_mis_count;
++atomic_unchecked_t irq_mis_count;
+
+ static void ack_apic_level(unsigned int irq)
+ {
+@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
+
+ /* Tail end of version 0x11 I/O APIC bug workaround */
+ if (!(v & (1 << (i & 0x1f)))) {
+- atomic_inc(&irq_mis_count);
++ atomic_inc_unchecked(&irq_mis_count);
+ spin_lock(&ioapic_lock);
+ __mask_and_edge_IO_APIC_irq(cfg);
+ __unmask_and_level_IO_APIC_irq(cfg);
+diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
+index 151ace6..f317474 100644
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
+ * This is for buggy BIOS's that refer to (real mode) segment 0x40
+ * even though they are called in protected mode.
+ */
+-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
+ (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
+
+ static const char driver_version[] = "1.16ac"; /* no spaces */
+@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
+ &call->esi);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+
+ return call->eax & 0xff;
+@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
+ BUG_ON(cpu != 0);
+ gdt = get_cpu_gdt_table(cpu);
+ save_desc_40 = gdt[0x40 / 8];
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = bad_bios_desc;
++ pax_close_kernel();
+
+ apm_irq_save(flags);
+ APM_DO_SAVE_SEGS;
+@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
+ &call->eax);
+ APM_DO_RESTORE_SEGS;
+ apm_irq_restore(flags);
++
++ pax_open_kernel();
+ gdt[0x40 / 8] = save_desc_40;
++ pax_close_kernel();
++
+ put_cpu();
+ return error;
+ }
+@@ -975,7 +989,7 @@ recalc:
+
+ static void apm_power_off(void)
+ {
+- unsigned char po_bios_call[] = {
++ const unsigned char po_bios_call[] = {
+ 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
+ 0x8e, 0xd0, /* movw ax,ss */
+ 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
+@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
+ * code to that CPU.
+ */
+ gdt = get_cpu_gdt_table(0);
++
++ pax_open_kernel();
+ set_desc_base(&gdt[APM_CS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
+ set_desc_base(&gdt[APM_CS_16 >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
+ set_desc_base(&gdt[APM_DS >> 3],
+ (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
++ pax_close_kernel();
+
+ proc_create("apm", 0, NULL, &apm_file_ops);
+
+diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
+index dfdbf64..9b2b6ce 100644
+--- a/arch/x86/kernel/asm-offsets_32.c
++++ b/arch/x86/kernel/asm-offsets_32.c
+@@ -51,7 +51,6 @@ void foo(void)
+ OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
+ BLANK();
+
+- OFFSET(TI_task, thread_info, task);
+ OFFSET(TI_exec_domain, thread_info, exec_domain);
+ OFFSET(TI_flags, thread_info, flags);
+ OFFSET(TI_status, thread_info, status);
+@@ -60,6 +59,8 @@ void foo(void)
+ OFFSET(TI_restart_block, thread_info, restart_block);
+ OFFSET(TI_sysenter_return, thread_info, sysenter_return);
+ OFFSET(TI_cpu, thread_info, cpu);
++ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
++ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
+ BLANK();
+
+ OFFSET(GDS_size, desc_ptr, size);
+@@ -99,6 +100,7 @@ void foo(void)
+
+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
++ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
+ DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
+ DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
+ DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
+@@ -115,6 +117,11 @@ void foo(void)
+ OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
+ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
++#endif
++
+ #endif
+
+ #ifdef CONFIG_XEN
+diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
+index 4a6aeed..371de20 100644
+--- a/arch/x86/kernel/asm-offsets_64.c
++++ b/arch/x86/kernel/asm-offsets_64.c
+@@ -44,6 +44,8 @@ int main(void)
+ ENTRY(addr_limit);
+ ENTRY(preempt_count);
+ ENTRY(status);
++ ENTRY(lowest_stack);
++ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
+ #ifdef CONFIG_IA32_EMULATION
+ ENTRY(sysenter_return);
+ #endif
+@@ -63,6 +65,18 @@ int main(void)
+ OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+ OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
+ OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
++#endif
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
++ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
++ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
++#endif
++
+ #endif
+
+
+@@ -115,6 +129,7 @@ int main(void)
+ ENTRY(cr8);
+ BLANK();
+ #undef ENTRY
++ DEFINE(TSS_size, sizeof(struct tss_struct));
+ DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
+ BLANK();
+ DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
+@@ -130,6 +145,7 @@ int main(void)
+
+ BLANK();
+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
++ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
+ #ifdef CONFIG_XEN
+ BLANK();
+ OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
+diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
+index 1f537a2..a7cb6c6 100644
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
+ CFLAGS_REMOVE_common.o = -pg
+ endif
+
+-# Make sure load_percpu_segment has no stackprotector
+-nostackp := $(call cc-option, -fno-stack-protector)
+-CFLAGS_common.o := $(nostackp)
+-
+ obj-y := intel_cacheinfo.o addon_cpuid_features.o
+ obj-y += proc.o capflags.o powerflags.o common.o
+ obj-y += vmware.o hypervisor.o sched.o
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 6e082dc..a0b5f36 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
+ unsigned int size)
+ {
+ /* AMD errata T13 (order #21922) */
+- if ((c->x86 == 6)) {
++ if (c->x86 == 6) {
+ /* Duron Rev A0 */
+ if (c->x86_model == 3 && c->x86_mask == 0)
+ size = 64;
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index ba1a1dd..1a4b0af 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
+
+ static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
+
+-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
+-#ifdef CONFIG_X86_64
+- /*
+- * We need valid kernel segments for data and code in long mode too
+- * IRET will check the segment types kkeil 2000/10/28
+- * Also sysret mandates a special GDT layout
+- *
+- * TLS descriptors are currently at a different place compared to i386.
+- * Hopefully nobody expects them at a fixed place (Wine?)
+- */
+- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
+-#else
+- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
+- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
+- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
+- /*
+- * Segments used for calling PnP BIOS have byte granularity.
+- * They code segments and data segments have fixed 64k limits,
+- * the transfer segment sizes are set at run time.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /* 16-bit data */
+- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
+- /*
+- * The APM segments have byte granularity and their bases
+- * are set at run time. All have 64k limits.
+- */
+- /* 32-bit code */
+- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
+- /* 16-bit code */
+- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
+- /* data */
+- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
+-
+- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+- GDT_STACK_CANARY_INIT
+-#endif
+-} };
+-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
+-
+ static int __init x86_xsave_setup(char *s)
+ {
+ setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+@@ -345,7 +291,7 @@ void switch_to_new_gdt(int cpu)
+ {
+ struct desc_ptr gdt_descr;
+
+- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ /* Reload the per-cpu base */
+@@ -799,6 +745,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
+ /* Filter out anything that depends on CPUID levels we don't have */
+ filter_cpuid_features(c, true);
+
++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
++ setup_clear_cpu_cap(X86_FEATURE_SEP);
++#endif
++
+ /* If the model name is still unset, do table lookup. */
+ if (!c->x86_model_id[0]) {
+ const char *p;
+@@ -982,6 +932,9 @@ static __init int setup_disablecpuid(char *arg)
+ }
+ __setup("clearcpuid=", setup_disablecpuid);
+
++DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
++EXPORT_PER_CPU_SYMBOL(current_tinfo);
++
+ #ifdef CONFIG_X86_64
+ struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
+
+@@ -997,7 +950,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
+ EXPORT_PER_CPU_SYMBOL(current_task);
+
+ DEFINE_PER_CPU(unsigned long, kernel_stack) =
+- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
++ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
+ EXPORT_PER_CPU_SYMBOL(kernel_stack);
+
+ DEFINE_PER_CPU(char *, irq_stack_ptr) =
+@@ -1062,7 +1015,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
+ {
+ memset(regs, 0, sizeof(struct pt_regs));
+ regs->fs = __KERNEL_PERCPU;
+- regs->gs = __KERNEL_STACK_CANARY;
++ savesegment(gs, regs->gs);
+
+ return regs;
+ }
+@@ -1103,7 +1056,7 @@ void __cpuinit cpu_init(void)
+ int i;
+
+ cpu = stack_smp_processor_id();
+- t = &per_cpu(init_tss, cpu);
++ t = init_tss + cpu;
+ orig_ist = &per_cpu(orig_ist, cpu);
+
+ #ifdef CONFIG_NUMA
+@@ -1129,7 +1082,7 @@ void __cpuinit cpu_init(void)
+ switch_to_new_gdt(cpu);
+ loadsegment(fs, 0);
+
+- load_idt((const struct desc_ptr *)&idt_descr);
++ load_idt(&idt_descr);
+
+ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
+ syscall_init();
+@@ -1138,7 +1091,6 @@ void __cpuinit cpu_init(void)
+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
+ barrier();
+
+- check_efer();
+ if (cpu != 0)
+ enable_x2apic();
+
+@@ -1201,7 +1153,7 @@ void __cpuinit cpu_init(void)
+ {
+ int cpu = smp_processor_id();
+ struct task_struct *curr = current;
+- struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct tss_struct *t = init_tss + cpu;
+ struct thread_struct *thread = &curr->thread;
+
+ if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
+diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
+index 6a77cca..4f4fca0 100644
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
+ * Update the IDT descriptor and reload the IDT so that
+ * it uses the read-only mapped virtual address.
+ */
+- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
+ load_idt(&idt_descr);
+ }
+ #endif
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index 417990f..8c489b8 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
+ return ret;
+ }
+
+-static struct sysfs_ops sysfs_ops = {
++static const struct sysfs_ops sysfs_ops = {
+ .show = show,
+ .store = store,
+ };
+@@ -931,6 +931,11 @@ static struct kobj_type ktype_cache = {
+ .default_attrs = default_attrs,
+ };
+
++static struct kobj_type ktype_l3_cache = {
++ .sysfs_ops = &sysfs_ops,
++ .default_attrs = default_l3_attrs,
++};
++
+ static struct kobj_type ktype_percpu_entry = {
+ .sysfs_ops = &sysfs_ops,
+ };
+@@ -997,6 +1002,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+ }
+
+ for (i = 0; i < num_cache_leaves; i++) {
++ struct kobj_type *ktype;
++
+ this_object = INDEX_KOBJECT_PTR(cpu, i);
+ this_object->cpu = cpu;
+ this_object->index = i;
+@@ -1004,12 +1011,12 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
+ this_leaf = CPUID4_INFO_IDX(cpu, i);
+
+ if (this_leaf->can_disable)
+- ktype_cache.default_attrs = default_l3_attrs;
++ ktype = &ktype_l3_cache;
+ else
+- ktype_cache.default_attrs = default_attrs;
++ ktype = &ktype_cache;
+
+ retval = kobject_init_and_add(&(this_object->kobj),
+- &ktype_cache,
++ ktype,
+ per_cpu(cache_kobject, cpu),
+ "index%1lu", i);
+ if (unlikely(retval)) {
+diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
+index 472763d..9831e11 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
++++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
+@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
+ static int inject_init(void)
+ {
+ printk(KERN_INFO "Machine check injector initialized\n");
+- mce_chrdev_ops.write = mce_write;
++ pax_open_kernel();
++ *(void **)&mce_chrdev_ops.write = mce_write;
++ pax_close_kernel();
+ register_die_notifier(&mce_raise_nb);
+ return 0;
+ }
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 28a7e4c8..3c94ef2 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -43,6 +43,7 @@
+ #include <asm/ipi.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/local.h>
+
+ #include "mce-internal.h"
+
+@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
+ !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
+ m->cs, m->ip);
+
+- if (m->cs == __KERNEL_CS)
++ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
+ print_symbol("{%s}", m->ip);
+ pr_cont("\n");
+ }
+@@ -221,10 +222,10 @@ static void print_mce_tail(void)
+
+ #define PANIC_TIMEOUT 5 /* 5 seconds */
+
+-static atomic_t mce_paniced;
++static atomic_unchecked_t mce_paniced;
+
+ static int fake_panic;
+-static atomic_t mce_fake_paniced;
++static atomic_unchecked_t mce_fake_paniced;
+
+ /* Panic in progress. Enable interrupts and wait for final IPI */
+ static void wait_for_panic(void)
+@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
+ /*
+ * Make sure only one CPU runs in machine check panic
+ */
+- if (atomic_inc_return(&mce_paniced) > 1)
++ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
+ wait_for_panic();
+ barrier();
+
+@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
+ console_verbose();
+ } else {
+ /* Don't log too much for fake panic */
+- if (atomic_inc_return(&mce_fake_paniced) > 1)
++ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
+ return;
+ }
+ print_mce_head();
+@@ -623,7 +624,7 @@ static int mce_timed_out(u64 *t)
+ * might have been modified by someone else.
+ */
+ rmb();
+- if (atomic_read(&mce_paniced))
++ if (atomic_read_unchecked(&mce_paniced))
+ wait_for_panic();
+ if (!monarch_timeout)
+ goto out;
+@@ -1401,7 +1402,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
+ }
+
+ /* Call the installed machine check handler for this CPU setup. */
+-void (*machine_check_vector)(struct pt_regs *, long error_code) =
++void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
+ unexpected_machine_check;
+
+ /*
+@@ -1423,7 +1424,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
+ return;
+ }
+
++ pax_open_kernel();
+ machine_check_vector = do_machine_check;
++ pax_close_kernel();
+
+ mce_init();
+ mce_cpu_features(c);
+@@ -1436,14 +1439,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
+ */
+
+ static DEFINE_SPINLOCK(mce_state_lock);
+-static int open_count; /* #times opened */
++static local_t open_count; /* #times opened */
+ static int open_exclu; /* already open exclusive? */
+
+ static int mce_open(struct inode *inode, struct file *file)
+ {
+ spin_lock(&mce_state_lock);
+
+- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
++ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
+ spin_unlock(&mce_state_lock);
+
+ return -EBUSY;
+@@ -1451,7 +1454,7 @@ static int mce_open(struct inode *inode, struct file *file)
+
+ if (file->f_flags & O_EXCL)
+ open_exclu = 1;
+- open_count++;
++ local_inc(&open_count);
+
+ spin_unlock(&mce_state_lock);
+
+@@ -1462,7 +1465,7 @@ static int mce_release(struct inode *inode, struct file *file)
+ {
+ spin_lock(&mce_state_lock);
+
+- open_count--;
++ local_dec(&open_count);
+ open_exclu = 0;
+
+ spin_unlock(&mce_state_lock);
+@@ -2014,7 +2017,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ return NOTIFY_OK;
+ }
+
+-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
++static struct notifier_block mce_cpu_notifier __cpuinitconst = {
+ .notifier_call = mce_cpu_callback,
+ };
+
+@@ -2089,7 +2092,7 @@ struct dentry *mce_get_debugfs_dir(void)
+ static void mce_reset(void)
+ {
+ cpu_missing = 0;
+- atomic_set(&mce_fake_paniced, 0);
++ atomic_set_unchecked(&mce_fake_paniced, 0);
+ atomic_set(&mce_executing, 0);
+ atomic_set(&mce_callin, 0);
+ atomic_set(&global_nwo, 0);
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+index ef3cd31..9d2f6ab 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
+ return ret;
+ }
+
+-static struct sysfs_ops threshold_ops = {
++static const struct sysfs_ops threshold_ops = {
+ .show = show,
+ .store = store,
+ };
+diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
+index 5c0e653..0882b0a 100644
+--- a/arch/x86/kernel/cpu/mcheck/p5.c
++++ b/arch/x86/kernel/cpu/mcheck/p5.c
+@@ -12,6 +12,7 @@
+ #include <asm/system.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/pgtable.h>
+
+ /* By default disabled */
+ int mce_p5_enabled __read_mostly;
+@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
+ if (!cpu_has(c, X86_FEATURE_MCE))
+ return;
+
++ pax_open_kernel();
+ machine_check_vector = pentium_machine_check;
++ pax_close_kernel();
+ /* Make sure the vector pointer is visible before we enable MCEs: */
+ wmb();
+
+diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
+index 54060f5..c1a7577 100644
+--- a/arch/x86/kernel/cpu/mcheck/winchip.c
++++ b/arch/x86/kernel/cpu/mcheck/winchip.c
+@@ -11,6 +11,7 @@
+ #include <asm/system.h>
+ #include <asm/mce.h>
+ #include <asm/msr.h>
++#include <asm/pgtable.h>
+
+ /* Machine check handler for WinChip C6: */
+ static void winchip_machine_check(struct pt_regs *regs, long error_code)
+@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
+ {
+ u32 lo, hi;
+
++ pax_open_kernel();
+ machine_check_vector = winchip_machine_check;
++ pax_close_kernel();
+ /* Make sure the vector pointer is visible before we enable MCEs: */
+ wmb();
+
+diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
+index 33af141..92ba9cd 100644
+--- a/arch/x86/kernel/cpu/mtrr/amd.c
++++ b/arch/x86/kernel/cpu/mtrr/amd.c
+@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
+ return 0;
+ }
+
+-static struct mtrr_ops amd_mtrr_ops = {
++static const struct mtrr_ops amd_mtrr_ops = {
+ .vendor = X86_VENDOR_AMD,
+ .set = amd_set_mtrr,
+ .get = amd_get_mtrr,
+diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
+index de89f14..316fe3e 100644
+--- a/arch/x86/kernel/cpu/mtrr/centaur.c
++++ b/arch/x86/kernel/cpu/mtrr/centaur.c
+@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
+ return 0;
+ }
+
+-static struct mtrr_ops centaur_mtrr_ops = {
++static const struct mtrr_ops centaur_mtrr_ops = {
+ .vendor = X86_VENDOR_CENTAUR,
+ .set = centaur_set_mcr,
+ .get = centaur_get_mcr,
+diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
+index 228d982..68a3343 100644
+--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
++++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
+@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
+ post_set();
+ }
+
+-static struct mtrr_ops cyrix_mtrr_ops = {
++static const struct mtrr_ops cyrix_mtrr_ops = {
+ .vendor = X86_VENDOR_CYRIX,
+ .set_all = cyrix_set_all,
+ .set = cyrix_set_arr,
+diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
+index 55da0c5..4d75584 100644
+--- a/arch/x86/kernel/cpu/mtrr/generic.c
++++ b/arch/x86/kernel/cpu/mtrr/generic.c
+@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
+ /*
+ * Generic structure...
+ */
+-struct mtrr_ops generic_mtrr_ops = {
++const struct mtrr_ops generic_mtrr_ops = {
+ .use_intel_if = 1,
+ .set_all = generic_set_all,
+ .get = generic_get_mtrr,
+diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
+index fd60f09..c94ef52 100644
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
+ u64 size_or_mask, size_and_mask;
+ static bool mtrr_aps_delayed_init;
+
+-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
+
+-struct mtrr_ops *mtrr_if;
++const struct mtrr_ops *mtrr_if;
+
+ static void set_mtrr(unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type);
+
+-void set_mtrr_ops(struct mtrr_ops *ops)
++void set_mtrr_ops(const struct mtrr_ops *ops)
+ {
+ if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
+ mtrr_ops[ops->vendor] = ops;
+diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
+index a501dee..816c719 100644
+--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
++++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
+@@ -25,14 +25,14 @@ struct mtrr_ops {
+ int (*validate_add_page)(unsigned long base, unsigned long size,
+ unsigned int type);
+ int (*have_wrcomb)(void);
+-};
++} __do_const;
+
+ extern int generic_get_free_region(unsigned long base, unsigned long size,
+ int replace_reg);
+ extern int generic_validate_add_page(unsigned long base, unsigned long size,
+ unsigned int type);
+
+-extern struct mtrr_ops generic_mtrr_ops;
++extern const struct mtrr_ops generic_mtrr_ops;
+
+ extern int positive_have_wrcomb(void);
+
+@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
+ u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
+ void get_mtrr_state(void);
+
+-extern void set_mtrr_ops(struct mtrr_ops *ops);
++extern void set_mtrr_ops(const struct mtrr_ops *ops);
+
+ extern u64 size_or_mask, size_and_mask;
+-extern struct mtrr_ops *mtrr_if;
++extern const struct mtrr_ops *mtrr_if;
+
+ #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
+ #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 0ff02ca..9994c9d 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
+ * count to the generic event atomically:
+ */
+ again:
+- prev_raw_count = atomic64_read(&hwc->prev_count);
++ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
+ rdmsrl(hwc->event_base + idx, new_raw_count);
+
+- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
++ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+@@ -741,7 +741,7 @@ again:
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
+
+- atomic64_add(delta, &event->count);
++ atomic64_add_unchecked(delta, &event->count);
+ atomic64_sub(delta, &hwc->period_left);
+
+ return new_raw_count;
+@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
+ * The hw event starts counting from this event offset,
+ * mark it to be able to extra future deltas:
+ */
+- atomic64_set(&hwc->prev_count, (u64)-left);
++ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
+
+ err = checking_wrmsrl(hwc->event_base + idx,
+ (u64)(-left) & x86_pmu.event_mask);
+@@ -1940,7 +1940,7 @@ perf_event_nmi_handler(struct notifier_block *self,
+ return NOTIFY_STOP;
+ }
+
+-static __read_mostly struct notifier_block perf_event_nmi_notifier = {
++static struct notifier_block perf_event_nmi_notifier = {
+ .notifier_call = perf_event_nmi_handler,
+ .next = NULL,
+ .priority = 1
+@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
+ break;
+
+ callchain_store(entry, frame.return_address);
+- fp = frame.next_frame;
++ fp = (__force const void __user *)frame.next_frame;
+ }
+ }
+
+diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
+index 898df97..9e82503 100644
+--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
++++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
+@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
+
+ /* Interface defining a CPU specific perfctr watchdog */
+ struct wd_ops {
+- int (*reserve)(void);
+- void (*unreserve)(void);
+- int (*setup)(unsigned nmi_hz);
+- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
+- void (*stop)(void);
++ int (* const reserve)(void);
++ void (* const unreserve)(void);
++ int (* const setup)(unsigned nmi_hz);
++ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
++ void (* const stop)(void);
+ unsigned perfctr;
+ unsigned evntsel;
+ u64 checkbit;
+@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
+ #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
+ #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
+
++/* cannot be const */
+ static struct wd_ops intel_arch_wd_ops;
+
+ static int setup_intel_arch_watchdog(unsigned nmi_hz)
+@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
+ return 1;
+ }
+
++/* cannot be const */
+ static struct wd_ops intel_arch_wd_ops __read_mostly = {
+ .reserve = single_msr_reserve,
+ .unreserve = single_msr_unreserve,
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index ff95824..2ffdcb5 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
+ regs = args->regs;
+
+ #ifdef CONFIG_X86_32
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ crash_fixup_ss_esp(&fixed_regs, regs);
+ regs = &fixed_regs;
+ }
+diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
+index 37250fe..bf2ec74 100644
+--- a/arch/x86/kernel/doublefault_32.c
++++ b/arch/x86/kernel/doublefault_32.c
+@@ -11,7 +11,7 @@
+
+ #define DOUBLEFAULT_STACKSIZE (1024)
+ static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
+-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
+
+ #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
+
+@@ -21,7 +21,7 @@ static void doublefault_fn(void)
+ unsigned long gdt, tss;
+
+ store_gdt(&gdt_desc);
+- gdt = gdt_desc.address;
++ gdt = (unsigned long)gdt_desc.address;
+
+ printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
+
+@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
+ /* 0x2 bit is always set */
+ .flags = X86_EFLAGS_SF | 0x2,
+ .sp = STACK_START,
+- .es = __USER_DS,
++ .es = __KERNEL_DS,
+ .cs = __KERNEL_CS,
+ .ss = __KERNEL_DS,
+- .ds = __USER_DS,
++ .ds = __KERNEL_DS,
+ .fs = __KERNEL_PERCPU,
+
+ .__cr3 = __pa_nodebug(swapper_pg_dir),
+diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
+index 2d8a371..4fa6ae6 100644
+--- a/arch/x86/kernel/dumpstack.c
++++ b/arch/x86/kernel/dumpstack.c
+@@ -2,6 +2,9 @@
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
+ */
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++#define __INCLUDED_BY_HIDESYM 1
++#endif
+ #include <linux/kallsyms.h>
+ #include <linux/kprobes.h>
+ #include <linux/uaccess.h>
+@@ -28,7 +31,7 @@ static int die_counter;
+
+ void printk_address(unsigned long address, int reliable)
+ {
+- printk(" [<%p>] %s%pS\n", (void *) address,
++ printk(" [<%p>] %s%pA\n", (void *) address,
+ reliable ? "" : "? ", (void *) address);
+ }
+
+@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
+ static void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+- struct thread_info *tinfo, int *graph)
++ struct task_struct *task, int *graph)
+ {
+- struct task_struct *task = tinfo->task;
+ unsigned long ret_addr;
+ int index = task->curr_ret_stack;
+
+@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
+ static inline void
+ print_ftrace_graph_addr(unsigned long addr, void *data,
+ const struct stacktrace_ops *ops,
+- struct thread_info *tinfo, int *graph)
++ struct task_struct *task, int *graph)
+ { }
+ #endif
+
+@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
+ */
+
+-static inline int valid_stack_ptr(struct thread_info *tinfo,
+- void *p, unsigned int size, void *end)
++static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
+ {
+- void *t = tinfo;
+ if (end) {
+ if (p < end && p >= (end-THREAD_SIZE))
+ return 1;
+@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
+ }
+
+ unsigned long
+-print_context_stack(struct thread_info *tinfo,
++print_context_stack(struct task_struct *task, void *stack_start,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph)
+ {
+ struct stack_frame *frame = (struct stack_frame *)bp;
+
+- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
++ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
+ unsigned long addr;
+
+ addr = *stack;
+@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
+ } else {
+ ops->address(data, addr, 0);
+ }
+- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
++ print_ftrace_graph_addr(addr, data, ops, task, graph);
+ }
+ stack++;
+ }
+@@ -180,7 +180,7 @@ void dump_stack(void)
+ #endif
+
+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
+- current->pid, current->comm, print_tainted(),
++ task_pid_nr(current), current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
+ return flags;
+ }
+
++extern void gr_handle_kernel_exploit(void);
++
+ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ {
+ if (regs && kexec_should_crash(current))
+@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+- do_exit(signr);
++
++ gr_handle_kernel_exploit();
++
++ do_group_exit(signr);
+ }
+
+ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
+@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
+ unsigned long flags = oops_begin();
+ int sig = SIGSEGV;
+
+- if (!user_mode_vm(regs))
++ if (!user_mode(regs))
+ report_bug(regs->ip, regs);
+
+ if (__die(str, regs, err))
+diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
+index 81086c2..13e8b17 100644
+--- a/arch/x86/kernel/dumpstack.h
++++ b/arch/x86/kernel/dumpstack.h
+@@ -15,7 +15,7 @@
+ #endif
+
+ extern unsigned long
+-print_context_stack(struct thread_info *tinfo,
++print_context_stack(struct task_struct *task, void *stack_start,
+ unsigned long *stack, unsigned long bp,
+ const struct stacktrace_ops *ops, void *data,
+ unsigned long *end, int *graph);
+diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
+index f7dd2a7..c7b8ce6 100644
+--- a/arch/x86/kernel/dumpstack_32.c
++++ b/arch/x86/kernel/dumpstack_32.c
+@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ #endif
+
+ for (;;) {
+- struct thread_info *context;
++ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
++ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
+
+- context = (struct thread_info *)
+- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+- bp = print_context_stack(context, stack, bp, ops,
+- data, NULL, &graph);
+-
+- stack = (unsigned long *)context->previous_esp;
+- if (!stack)
++ if (stack_start == task_stack_page(task))
+ break;
++ stack = *(unsigned long **)stack_start;
+ if (ops->stack(data, "IRQ") < 0)
+ break;
+ touch_nmi_watchdog();
+@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+- if (!user_mode_vm(regs)) {
++ if (!user_mode(regs)) {
+ unsigned int code_prologue = code_bytes * 43 / 64;
+ unsigned int code_len = code_bytes;
+ unsigned char c;
+ u8 *ip;
++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
+
+ printk(KERN_EMERG "Stack:\n");
+ show_stack_log_lvl(NULL, regs, &regs->sp,
+@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
+
+ printk(KERN_EMERG "Code: ");
+
+- ip = (u8 *)regs->ip - code_prologue;
++ ip = (u8 *)regs->ip - code_prologue + cs_base;
+ if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+ /* try starting at IP */
+- ip = (u8 *)regs->ip;
++ ip = (u8 *)regs->ip + cs_base;
+ code_len = code_len - code_prologue + 1;
+ }
+ for (i = 0; i < code_len; i++, ip++) {
+@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
+ printk(" Bad EIP value.");
+ break;
+ }
+- if (ip == (u8 *)regs->ip)
++ if (ip == (u8 *)regs->ip + cs_base)
+ printk("<%02x> ", c);
+ else
+ printk("%02x ", c);
+@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
+ printk("\n");
+ }
+
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_check_alloca(unsigned long size)
++{
++ unsigned long sp = (unsigned long)&sp, stack_left;
++
++ /* all kernel stacks are of the same size */
++ stack_left = sp & (THREAD_SIZE - 1);
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
++
+ int is_valid_bugaddr(unsigned long ip)
+ {
+ unsigned short ud2;
+
++ ip = ktla_ktva(ip);
+ if (ip < PAGE_OFFSET)
+ return 0;
+ if (probe_kernel_address((unsigned short *)ip, ud2))
+diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
+index a071e6b..1ad66d7 100644
+--- a/arch/x86/kernel/dumpstack_64.c
++++ b/arch/x86/kernel/dumpstack_64.c
+@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ unsigned long *irq_stack_end =
+ (unsigned long *)per_cpu(irq_stack_ptr, cpu);
+ unsigned used = 0;
+- struct thread_info *tinfo;
+ int graph = 0;
++ void *stack_start;
+
+ if (!task)
+ task = current;
+@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ * current stack address. If the stacks consist of nested
+ * exceptions
+ */
+- tinfo = task_thread_info(task);
+ for (;;) {
+ char *id;
+ unsigned long *estack_end;
++
+ estack_end = in_exception_stack(cpu, (unsigned long)stack,
+ &used, &id);
+
+@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ if (ops->stack(data, id) < 0)
+ break;
+
+- bp = print_context_stack(tinfo, stack, bp, ops,
++ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
+ data, estack_end, &graph);
+ ops->stack(data, "<EOE>");
+ /*
+@@ -165,6 +165,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ * second-to-last pointer (index -2 to end) in the
+ * exception stack:
+ */
++ if ((u16)estack_end[-1] != __KERNEL_DS)
++ goto out;
+ stack = (unsigned long *) estack_end[-2];
+ continue;
+ }
+@@ -176,7 +178,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ if (stack >= irq_stack && stack < irq_stack_end) {
+ if (ops->stack(data, "IRQ") < 0)
+ break;
+- bp = print_context_stack(tinfo, stack, bp,
++ bp = print_context_stack(task, irq_stack, stack, bp,
+ ops, data, irq_stack_end, &graph);
+ /*
+ * We link to the next stack (which would be
+@@ -195,7 +197,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
+ /*
+ * This handles the process stack:
+ */
+- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
++ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
++ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
++out:
+ put_cpu();
+ }
+ EXPORT_SYMBOL(dump_trace);
+@@ -248,7 +252,7 @@ void show_registers(struct pt_regs *regs)
+ {
+ int i;
+ unsigned long sp;
+- const int cpu = smp_processor_id();
++ const int cpu = raw_smp_processor_id();
+ struct task_struct *cur = current;
+
+ sp = regs->sp;
+@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
+ return ud2 == 0x0b0f;
+ }
+
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++void pax_check_alloca(unsigned long size)
++{
++ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
++ unsigned cpu, used;
++ char *id;
++
++ /* check the process stack first */
++ stack_start = (unsigned long)task_stack_page(current);
++ stack_end = stack_start + THREAD_SIZE;
++ if (likely(stack_start <= sp && sp < stack_end)) {
++ unsigned long stack_left = sp & (THREAD_SIZE - 1);
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ cpu = get_cpu();
++
++ /* check the irq stacks */
++ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
++ stack_start = stack_end - IRQ_STACK_SIZE;
++ if (stack_start <= sp && sp < stack_end) {
++ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
++ put_cpu();
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ /* check the exception stacks */
++ used = 0;
++ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
++ stack_start = stack_end - EXCEPTION_STKSZ;
++ if (stack_end && stack_start <= sp && sp < stack_end) {
++ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
++ put_cpu();
++ BUG_ON(stack_left < 256 || size >= stack_left - 256);
++ return;
++ }
++
++ put_cpu();
++
++ /* unknown stack */
++ BUG();
++}
++EXPORT_SYMBOL(pax_check_alloca);
++#endif
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index a89739a..95e0c48 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -733,7 +733,7 @@ struct early_res {
+ };
+ static struct early_res early_res[MAX_EARLY_RES] __initdata = {
+ { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
+- {}
++ { 0, 0, {0}, 0 }
+ };
+
+ static int __init find_overlapped_early(u64 start, u64 end)
+diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
+index b9c830c..1e41a96 100644
+--- a/arch/x86/kernel/early_printk.c
++++ b/arch/x86/kernel/early_printk.c
+@@ -7,6 +7,7 @@
+ #include <linux/pci_regs.h>
+ #include <linux/pci_ids.h>
+ #include <linux/errno.h>
++#include <linux/sched.h>
+ #include <asm/io.h>
+ #include <asm/processor.h>
+ #include <asm/fcntl.h>
+@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
+ int n;
+ va_list ap;
+
++ pax_track_stack();
++
+ va_start(ap, fmt);
+ n = vscnprintf(buf, sizeof(buf), fmt, ap);
+ early_console->write(early_console, buf, n);
+diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
+index 5cab48e..b025f9b 100644
+--- a/arch/x86/kernel/efi_32.c
++++ b/arch/x86/kernel/efi_32.c
+@@ -38,70 +38,56 @@
+ */
+
+ static unsigned long efi_rt_eflags;
+-static pgd_t efi_bak_pg_dir_pointer[2];
++static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
+
+-void efi_call_phys_prelog(void)
++void __init efi_call_phys_prelog(void)
+ {
+- unsigned long cr4;
+- unsigned long temp;
+ struct desc_ptr gdt_descr;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ struct desc_struct d;
++#endif
++
+ local_irq_save(efi_rt_eflags);
+
+- /*
+- * If I don't have PAE, I should just duplicate two entries in page
+- * directory. If I have PAE, I just need to duplicate one entry in
+- * page directory.
+- */
+- cr4 = read_cr4_safe();
+-
+- if (cr4 & X86_CR4_PAE) {
+- efi_bak_pg_dir_pointer[0].pgd =
+- swapper_pg_dir[pgd_index(0)].pgd;
+- swapper_pg_dir[0].pgd =
+- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
+- } else {
+- efi_bak_pg_dir_pointer[0].pgd =
+- swapper_pg_dir[pgd_index(0)].pgd;
+- efi_bak_pg_dir_pointer[1].pgd =
+- swapper_pg_dir[pgd_index(0x400000)].pgd;
+- swapper_pg_dir[pgd_index(0)].pgd =
+- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
+- temp = PAGE_OFFSET + 0x400000;
+- swapper_pg_dir[pgd_index(0x400000)].pgd =
+- swapper_pg_dir[pgd_index(temp)].pgd;
+- }
++ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
++ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
++ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+
+ /*
+ * After the lock is released, the original page table is restored.
+ */
+ __flush_tlb_all();
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
++ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
++#endif
++
+ gdt_descr.address = __pa(get_cpu_gdt_table(0));
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+ }
+
+-void efi_call_phys_epilog(void)
++void __init efi_call_phys_epilog(void)
+ {
+- unsigned long cr4;
+ struct desc_ptr gdt_descr;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ struct desc_struct d;
++
++ memset(&d, 0, sizeof d);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
++ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
++#endif
++
+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+
+- cr4 = read_cr4_safe();
+-
+- if (cr4 & X86_CR4_PAE) {
+- swapper_pg_dir[pgd_index(0)].pgd =
+- efi_bak_pg_dir_pointer[0].pgd;
+- } else {
+- swapper_pg_dir[pgd_index(0)].pgd =
+- efi_bak_pg_dir_pointer[0].pgd;
+- swapper_pg_dir[pgd_index(0x400000)].pgd =
+- efi_bak_pg_dir_pointer[1].pgd;
+- }
++ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
+
+ /*
+ * After the lock is released, the original page table is restored.
+diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
+index fbe66e6..eae5e38 100644
+--- a/arch/x86/kernel/efi_stub_32.S
++++ b/arch/x86/kernel/efi_stub_32.S
+@@ -6,7 +6,9 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/init.h>
+ #include <asm/page_types.h>
++#include <asm/segment.h>
+
+ /*
+ * efi_call_phys(void *, ...) is a function with variable parameters.
+@@ -20,7 +22,7 @@
+ * service functions will comply with gcc calling convention, too.
+ */
+
+-.text
++__INIT
+ ENTRY(efi_call_phys)
+ /*
+ * 0. The function can only be called in Linux kernel. So CS has been
+@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
+ * The mapping of lower virtual memory has been created in prelog and
+ * epilog.
+ */
+- movl $1f, %edx
+- subl $__PAGE_OFFSET, %edx
+- jmp *%edx
++#ifdef CONFIG_PAX_KERNEXEC
++ movl $(__KERNEXEC_EFI_DS), %edx
++ mov %edx, %ds
++ mov %edx, %es
++ mov %edx, %ss
++ addl $2f,(1f)
++ ljmp *(1f)
++
++__INITDATA
++1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
++.previous
++
++2:
++ subl $2b,(1b)
++#else
++ jmp 1f-__PAGE_OFFSET
+ 1:
++#endif
+
+ /*
+ * 2. Now on the top of stack is the return
+@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
+ * parameter 2, ..., param n. To make things easy, we save the return
+ * address of efi_call_phys in a global variable.
+ */
+- popl %edx
+- movl %edx, saved_return_addr
+- /* get the function pointer into ECX*/
+- popl %ecx
+- movl %ecx, efi_rt_function_ptr
+- movl $2f, %edx
+- subl $__PAGE_OFFSET, %edx
+- pushl %edx
++ popl (saved_return_addr)
++ popl (efi_rt_function_ptr)
+
+ /*
+ * 3. Clear PG bit in %CR0.
+@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
+ /*
+ * 5. Call the physical function.
+ */
+- jmp *%ecx
++ call *(efi_rt_function_ptr-__PAGE_OFFSET)
+
+-2:
+ /*
+ * 6. After EFI runtime service returns, control will return to
+ * following instruction. We'd better readjust stack pointer first.
+@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
+ movl %cr0, %edx
+ orl $0x80000000, %edx
+ movl %edx, %cr0
+- jmp 1f
+-1:
++
+ /*
+ * 8. Now restore the virtual mode from flat mode by
+ * adding EIP with PAGE_OFFSET.
+ */
+- movl $1f, %edx
+- jmp *%edx
++#ifdef CONFIG_PAX_KERNEXEC
++ movl $(__KERNEL_DS), %edx
++ mov %edx, %ds
++ mov %edx, %es
++ mov %edx, %ss
++ ljmp $(__KERNEL_CS),$1f
++#else
++ jmp 1f+__PAGE_OFFSET
++#endif
+ 1:
+
+ /*
+ * 9. Balance the stack. And because EAX contain the return value,
+ * we'd better not clobber it.
+ */
+- leal efi_rt_function_ptr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
++ pushl (efi_rt_function_ptr)
+
+ /*
+- * 10. Push the saved return address onto the stack and return.
++ * 10. Return to the saved return address.
+ */
+- leal saved_return_addr, %edx
+- movl (%edx), %ecx
+- pushl %ecx
+- ret
++ jmpl *(saved_return_addr)
+ ENDPROC(efi_call_phys)
+ .previous
+
+-.data
++__INITDATA
+ saved_return_addr:
+ .long 0
+ efi_rt_function_ptr:
+diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
+index 4c07cca..2c8427d 100644
+--- a/arch/x86/kernel/efi_stub_64.S
++++ b/arch/x86/kernel/efi_stub_64.S
+@@ -7,6 +7,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <asm/alternative-asm.h>
+
+ #define SAVE_XMM \
+ mov %rsp, %rax; \
+@@ -40,6 +41,7 @@ ENTRY(efi_call0)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call0)
+
+@@ -50,6 +52,7 @@ ENTRY(efi_call1)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call1)
+
+@@ -60,6 +63,7 @@ ENTRY(efi_call2)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call2)
+
+@@ -71,6 +75,7 @@ ENTRY(efi_call3)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call3)
+
+@@ -83,6 +88,7 @@ ENTRY(efi_call4)
+ call *%rdi
+ addq $32, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call4)
+
+@@ -96,6 +102,7 @@ ENTRY(efi_call5)
+ call *%rdi
+ addq $48, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call5)
+
+@@ -112,5 +119,6 @@ ENTRY(efi_call6)
+ call *%rdi
+ addq $48, %rsp
+ RESTORE_XMM
++ pax_force_retaddr 0, 1
+ ret
+ ENDPROC(efi_call6)
+diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
+index c097e7d..a3f1930 100644
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -95,12 +95,6 @@
+ #endif
+ .endm
+
+-#ifdef CONFIG_VM86
+-#define resume_userspace_sig check_userspace
+-#else
+-#define resume_userspace_sig resume_userspace
+-#endif
+-
+ /*
+ * User gs save/restore
+ *
+@@ -185,13 +179,153 @@
+ /*CFI_REL_OFFSET gs, PT_GS*/
+ .endm
+ .macro SET_KERNEL_GS reg
++
++#ifdef CONFIG_CC_STACKPROTECTOR
+ movl $(__KERNEL_STACK_CANARY), \reg
++#elif defined(CONFIG_PAX_MEMORY_UDEREF)
++ movl $(__USER_DS), \reg
++#else
++ xorl \reg, \reg
++#endif
++
+ movl \reg, %gs
+ .endm
+
+ #endif /* CONFIG_X86_32_LAZY_GS */
+
+-.macro SAVE_ALL
++.macro pax_enter_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++.endm
++
++.macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++.endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ bts $16, %esi
++ jnc 1f
++ mov %cs, %esi
++ cmp $__KERNEL_CS, %esi
++ jz 3f
++ ljmp $__KERNEL_CS, $3f
++1: ljmp $__KERNEXEC_KERNEL_CS, $2f
++2:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
++#else
++ mov %esi, %cr0
++#endif
++3:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++#ifdef CONFIG_PARAVIRT
++ pushl %eax
++ pushl %ecx
++#endif
++ mov %cs, %esi
++ cmp $__KERNEXEC_KERNEL_CS, %esi
++ jnz 2f
++#ifdef CONFIG_PARAVIRT
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
++ mov %eax, %esi
++#else
++ mov %cr0, %esi
++#endif
++ btr $16, %esi
++ ljmp $__KERNEL_CS, $1f
++1:
++#ifdef CONFIG_PARAVIRT
++ mov %esi, %eax
++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
++#else
++ mov %esi, %cr0
++#endif
++2:
++#ifdef CONFIG_PARAVIRT
++ popl %ecx
++ popl %eax
++#endif
++ ret
++ENDPROC(pax_exit_kernel)
++#endif
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++/*
++ * ebp: thread_info
++ */
++ENTRY(pax_erase_kstack)
++ pushl %edi
++ pushl %ecx
++ pushl %eax
++
++ mov TI_lowest_stack(%ebp), %edi
++ mov $-0xBEEF, %eax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $2, %ecx
++ repne scasl
++ jecxz 2f
++
++ cmp $2*16, %ecx
++ jc 2f
++
++ mov $2*16, %ecx
++ repe scasl
++ jecxz 2f
++ jne 1b
++
++2: cld
++ mov %esp, %ecx
++ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %ecx
++ jb 3f
++ ud2
++3:
++
++ shr $2, %ecx
++ rep stosl
++
++ mov TI_task_thread_sp0(%ebp), %edi
++ sub $128, %edi
++ mov %edi, TI_lowest_stack(%ebp)
++
++ popl %eax
++ popl %ecx
++ popl %edi
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
++
++.macro __SAVE_ALL _DS
+ cld
+ PUSH_GS
+ pushl %fs
+@@ -224,7 +358,7 @@
+ pushl %ebx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ebx, 0
+- movl $(__USER_DS), %edx
++ movl $\_DS, %edx
+ movl %edx, %ds
+ movl %edx, %es
+ movl $(__KERNEL_PERCPU), %edx
+@@ -232,6 +366,15 @@
+ SET_KERNEL_GS %edx
+ .endm
+
++.macro SAVE_ALL
++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
++ __SAVE_ALL __KERNEL_DS
++ pax_enter_kernel
++#else
++ __SAVE_ALL __USER_DS
++#endif
++.endm
++
+ .macro RESTORE_INT_REGS
+ popl %ebx
+ CFI_ADJUST_CFA_OFFSET -4
+@@ -331,7 +474,7 @@ ENTRY(ret_from_fork)
+ CFI_ADJUST_CFA_OFFSET -4
+ jmp syscall_exit
+ CFI_ENDPROC
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+
+ /*
+ * Return to user mode is not as complex as all this looks,
+@@ -347,12 +490,29 @@ ret_from_exception:
+ preempt_stop(CLBR_ANY)
+ ret_from_intr:
+ GET_THREAD_INFO(%ebp)
+-check_userspace:
++resume_userspace_sig:
++#ifdef CONFIG_VM86
+ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
+ movb PT_CS(%esp), %al
+ andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
++#else
++ /*
++ * We can be coming here from a syscall done in the kernel space,
++ * e.g. a failed kernel_execve().
++ */
++ movl PT_CS(%esp), %eax
++ andl $SEGMENT_RPL_MASK, %eax
++#endif
+ cmpl $USER_RPL, %eax
++
++#ifdef CONFIG_PAX_KERNEXEC
++ jae resume_userspace
++
++ pax_exit_kernel
++ jmp resume_kernel
++#else
+ jb resume_kernel # not returning to v8086 or userspace
++#endif
+
+ ENTRY(resume_userspace)
+ LOCKDEP_SYS_EXIT
+@@ -364,8 +524,8 @@ ENTRY(resume_userspace)
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
+ # int/exception return?
+ jne work_pending
+- jmp restore_all
+-END(ret_from_exception)
++ jmp restore_all_pax
++ENDPROC(ret_from_exception)
+
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+@@ -380,7 +540,7 @@ need_resched:
+ jz restore_all
+ call preempt_schedule_irq
+ jmp need_resched
+-END(resume_kernel)
++ENDPROC(resume_kernel)
+ #endif
+ CFI_ENDPROC
+
+@@ -414,25 +574,36 @@ sysenter_past_esp:
+ /*CFI_REL_OFFSET cs, 0*/
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+- * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ pushl $0
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET eip, 0
+
+ pushl %eax
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl TI_sysenter_return(%ebp),%ebp
++ movl %ebp,PT_EIP(%esp)
+ ENABLE_INTERRUPTS(CLBR_NONE)
+
+ /*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
++ movl PT_OLDESP(%esp),%ebp
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ mov PT_OLDSS(%esp),%ds
++1: movl %ds:(%ebp),%ebp
++ push %ss
++ pop %ds
++#else
+ cmpl $__PAGE_OFFSET-3,%ebp
+ jae syscall_fault
+ 1: movl (%ebp),%ebp
++#endif
++
+ movl %ebp,PT_EBP(%esp)
+ .section __ex_table,"a"
+ .align 4
+@@ -441,6 +612,10 @@ sysenter_past_esp:
+
+ GET_THREAD_INFO(%ebp)
+
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+ jnz sysenter_audit
+ sysenter_do_call:
+@@ -455,12 +630,24 @@ sysenter_do_call:
+ testl $_TIF_ALLWORK_MASK, %ecx
+ jne sysexit_audit
+ sysenter_exit:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushl_cfi %eax
++ movl %esp, %eax
++ call pax_randomize_kstack
++ popl_cfi %eax
++#endif
++
++ pax_erase_kstack
++
+ /* if something modifies registers it must also disable sysexit */
+ movl PT_EIP(%esp), %edx
+ movl PT_OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ TRACE_IRQS_ON
+ 1: mov PT_FS(%esp), %fs
++2: mov PT_DS(%esp), %ds
++3: mov PT_ES(%esp), %es
+ PTGS_TO_GS
+ ENABLE_INTERRUPTS_SYSEXIT
+
+@@ -477,6 +664,9 @@ sysenter_audit:
+ movl %eax,%edx /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ pushl %ebx
+ CFI_ADJUST_CFA_OFFSET 4
+ movl PT_EAX(%esp),%eax /* reload syscall number */
+@@ -504,11 +694,17 @@ sysexit_audit:
+
+ CFI_ENDPROC
+ .pushsection .fixup,"ax"
+-2: movl $0,PT_FS(%esp)
++4: movl $0,PT_FS(%esp)
++ jmp 1b
++5: movl $0,PT_DS(%esp)
++ jmp 1b
++6: movl $0,PT_ES(%esp)
+ jmp 1b
+ .section __ex_table,"a"
+ .align 4
+- .long 1b,2b
++ .long 1b,4b
++ .long 2b,5b
++ .long 3b,6b
+ .popsection
+ PTGS_TO_GS_EX
+ ENDPROC(ia32_sysenter_target)
+@@ -520,6 +716,11 @@ ENTRY(system_call)
+ CFI_ADJUST_CFA_OFFSET 4
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ # system call tracing in operation / emulation
+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+ jnz syscall_trace_entry
+@@ -538,6 +739,15 @@ syscall_exit:
+ testl $_TIF_ALLWORK_MASK, %ecx # current->work
+ jne syscall_exit_work
+
++restore_all_pax:
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ movl %esp, %eax
++ call pax_randomize_kstack
++#endif
++
++ pax_erase_kstack
++
+ restore_all:
+ TRACE_IRQS_IRET
+ restore_all_notrace:
+@@ -602,10 +812,29 @@ ldt_ss:
+ mov PT_OLDESP(%esp), %eax /* load userspace esp */
+ mov %dx, %ax /* eax: new kernel esp */
+ sub %eax, %edx /* offset (low word is 0) */
+- PER_CPU(gdt_page, %ebx)
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ shr $16, %edx
++
++#ifdef CONFIG_PAX_KERNEXEC
++ mov %cr0, %esi
++ btr $16, %esi
++ mov %esi, %cr0
++#endif
++
+ mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
+ mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ bts $16, %esi
++ mov %esi, %cr0
++#endif
++
+ pushl $__ESPFIX_SS
+ CFI_ADJUST_CFA_OFFSET 4
+ push %eax /* new kernel esp */
+@@ -636,36 +865,30 @@ work_resched:
+ movl TI_flags(%ebp), %ecx
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+- jz restore_all
++ jz restore_all_pax
+ testb $_TIF_NEED_RESCHED, %cl
+ jnz work_resched
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
++ movl %esp, %eax
+ #ifdef CONFIG_VM86
+ testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+- movl %esp, %eax
+- jne work_notifysig_v86 # returning to kernel-space or
++ jz 1f # returning to kernel-space or
+ # vm86-space
+- xorl %edx, %edx
+- call do_notify_resume
+- jmp resume_userspace_sig
+
+- ALIGN
+-work_notifysig_v86:
+ pushl %ecx # save ti_flags for do_notify_resume
+ CFI_ADJUST_CFA_OFFSET 4
+ call save_v86_state # %eax contains pt_regs pointer
+ popl %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ movl %eax, %esp
+-#else
+- movl %esp, %eax
++1:
+ #endif
+ xorl %edx, %edx
+ call do_notify_resume
+ jmp resume_userspace_sig
+-END(work_pending)
++ENDPROC(work_pending)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -673,11 +896,14 @@ syscall_trace_entry:
+ movl $-ENOSYS,PT_EAX(%esp)
+ movl %esp, %eax
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /* What it returned is what we'll actually use. */
+ cmpl $(nr_syscalls), %eax
+ jnae syscall_call
+ jmp syscall_exit
+-END(syscall_trace_entry)
++ENDPROC(syscall_trace_entry)
+
+ # perform syscall exit tracing
+ ALIGN
+@@ -690,20 +916,24 @@ syscall_exit_work:
+ movl %esp, %eax
+ call syscall_trace_leave
+ jmp resume_userspace
+-END(syscall_exit_work)
++ENDPROC(syscall_exit_work)
+ CFI_ENDPROC
+
+ RING0_INT_FRAME # can't unwind into user space anyway
+ syscall_fault:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ push %ss
++ pop %ds
++#endif
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT,PT_EAX(%esp)
+ jmp resume_userspace
+-END(syscall_fault)
++ENDPROC(syscall_fault)
+
+ syscall_badsys:
+ movl $-ENOSYS,PT_EAX(%esp)
+ jmp resume_userspace
+-END(syscall_badsys)
++ENDPROC(syscall_badsys)
+ CFI_ENDPROC
+
+ /*
+@@ -726,6 +956,33 @@ PTREGSCALL(rt_sigreturn)
+ PTREGSCALL(vm86)
+ PTREGSCALL(vm86old)
+
++ ALIGN;
++ENTRY(kernel_execve)
++ push %ebp
++ sub $PT_OLDSS+4,%esp
++ push %edi
++ push %ecx
++ push %eax
++ lea 3*4(%esp),%edi
++ mov $PT_OLDSS/4+1,%ecx
++ xorl %eax,%eax
++ rep stosl
++ pop %eax
++ pop %ecx
++ pop %edi
++ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
++ mov %eax,PT_EBX(%esp)
++ mov %edx,PT_ECX(%esp)
++ mov %ecx,PT_EDX(%esp)
++ mov %esp,%eax
++ call sys_execve
++ GET_THREAD_INFO(%ebp)
++ test %eax,%eax
++ jz syscall_exit
++ add $PT_OLDSS+4,%esp
++ pop %ebp
++ ret
++
+ .macro FIXUP_ESPFIX_STACK
+ /*
+ * Switch back for ESPFIX stack to the normal zerobased stack
+@@ -735,7 +992,13 @@ PTREGSCALL(vm86old)
+ * normal stack and adjusts ESP with the matching offset.
+ */
+ /* fixup the stack */
+- PER_CPU(gdt_page, %ebx)
++#ifdef CONFIG_SMP
++ movl PER_CPU_VAR(cpu_number), %ebx
++ shll $PAGE_SHIFT_asm, %ebx
++ addl $cpu_gdt_table, %ebx
++#else
++ movl $cpu_gdt_table, %ebx
++#endif
+ mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
+ mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
+ shl $16, %eax
+@@ -793,7 +1056,7 @@ vector=vector+1
+ .endr
+ 2: jmp common_interrupt
+ .endr
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ .previous
+ END(interrupt)
+@@ -840,7 +1103,7 @@ ENTRY(coprocessor_error)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(coprocessor_error)
++ENDPROC(coprocessor_error)
+
+ ENTRY(simd_coprocessor_error)
+ RING0_INT_FRAME
+@@ -850,7 +1113,7 @@ ENTRY(simd_coprocessor_error)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(simd_coprocessor_error)
++ENDPROC(simd_coprocessor_error)
+
+ ENTRY(device_not_available)
+ RING0_INT_FRAME
+@@ -860,7 +1123,7 @@ ENTRY(device_not_available)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(device_not_available)
++ENDPROC(device_not_available)
+
+ #ifdef CONFIG_PARAVIRT
+ ENTRY(native_iret)
+@@ -869,12 +1132,12 @@ ENTRY(native_iret)
+ .align 4
+ .long native_iret, iret_exc
+ .previous
+-END(native_iret)
++ENDPROC(native_iret)
+
+ ENTRY(native_irq_enable_sysexit)
+ sti
+ sysexit
+-END(native_irq_enable_sysexit)
++ENDPROC(native_irq_enable_sysexit)
+ #endif
+
+ ENTRY(overflow)
+@@ -885,7 +1148,7 @@ ENTRY(overflow)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(overflow)
++ENDPROC(overflow)
+
+ ENTRY(bounds)
+ RING0_INT_FRAME
+@@ -895,7 +1158,7 @@ ENTRY(bounds)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(bounds)
++ENDPROC(bounds)
+
+ ENTRY(invalid_op)
+ RING0_INT_FRAME
+@@ -905,7 +1168,7 @@ ENTRY(invalid_op)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(invalid_op)
++ENDPROC(invalid_op)
+
+ ENTRY(coprocessor_segment_overrun)
+ RING0_INT_FRAME
+@@ -915,7 +1178,7 @@ ENTRY(coprocessor_segment_overrun)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(coprocessor_segment_overrun)
++ENDPROC(coprocessor_segment_overrun)
+
+ ENTRY(invalid_TSS)
+ RING0_EC_FRAME
+@@ -923,7 +1186,7 @@ ENTRY(invalid_TSS)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(invalid_TSS)
++ENDPROC(invalid_TSS)
+
+ ENTRY(segment_not_present)
+ RING0_EC_FRAME
+@@ -931,7 +1194,7 @@ ENTRY(segment_not_present)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(segment_not_present)
++ENDPROC(segment_not_present)
+
+ ENTRY(stack_segment)
+ RING0_EC_FRAME
+@@ -939,7 +1202,7 @@ ENTRY(stack_segment)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(stack_segment)
++ENDPROC(stack_segment)
+
+ ENTRY(alignment_check)
+ RING0_EC_FRAME
+@@ -947,7 +1210,7 @@ ENTRY(alignment_check)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(alignment_check)
++ENDPROC(alignment_check)
+
+ ENTRY(divide_error)
+ RING0_INT_FRAME
+@@ -957,7 +1220,7 @@ ENTRY(divide_error)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(divide_error)
++ENDPROC(divide_error)
+
+ #ifdef CONFIG_X86_MCE
+ ENTRY(machine_check)
+@@ -968,7 +1231,7 @@ ENTRY(machine_check)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(machine_check)
++ENDPROC(machine_check)
+ #endif
+
+ ENTRY(spurious_interrupt_bug)
+@@ -979,7 +1242,7 @@ ENTRY(spurious_interrupt_bug)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(spurious_interrupt_bug)
++ENDPROC(spurious_interrupt_bug)
+
+ ENTRY(kernel_thread_helper)
+ pushl $0 # fake return address for unwinder
+@@ -1095,7 +1358,7 @@ ENDPROC(xen_failsafe_callback)
+
+ ENTRY(mcount)
+ ret
+-END(mcount)
++ENDPROC(mcount)
+
+ ENTRY(ftrace_caller)
+ cmpl $0, function_trace_stop
+@@ -1124,7 +1387,7 @@ ftrace_graph_call:
+ .globl ftrace_stub
+ ftrace_stub:
+ ret
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+
+ #else /* ! CONFIG_DYNAMIC_FTRACE */
+
+@@ -1160,7 +1423,7 @@ trace:
+ popl %ecx
+ popl %eax
+ jmp ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+
+@@ -1181,7 +1444,7 @@ ENTRY(ftrace_graph_caller)
+ popl %ecx
+ popl %eax
+ ret
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+
+ .globl return_to_handler
+ return_to_handler:
+@@ -1198,7 +1461,6 @@ return_to_handler:
+ ret
+ #endif
+
+-.section .rodata,"a"
+ #include "syscall_table_32.S"
+
+ syscall_table_size=(.-sys_call_table)
+@@ -1255,15 +1517,18 @@ error_code:
+ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
+ REG_TO_PTGS %ecx
+ SET_KERNEL_GS %ecx
+- movl $(__USER_DS), %ecx
++ movl $(__KERNEL_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
++
++ pax_enter_kernel
++
+ TRACE_IRQS_OFF
+ movl %esp,%eax # pt_regs pointer
+ call *%edi
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(page_fault)
++ENDPROC(page_fault)
+
+ /*
+ * Debug traps and NMI can happen at the one SYSENTER instruction
+@@ -1309,7 +1574,7 @@ debug_stack_correct:
+ call do_debug
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(debug)
++ENDPROC(debug)
+
+ /*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+@@ -1351,6 +1616,9 @@ nmi_stack_correct:
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
++
++ pax_exit_kernel
++
+ jmp restore_all_notrace
+ CFI_ENDPROC
+
+@@ -1391,12 +1659,15 @@ nmi_espfix_stack:
+ FIXUP_ESPFIX_STACK # %eax == %esp
+ xorl %edx,%edx # zero error code
+ call do_nmi
++
++ pax_exit_kernel
++
+ RESTORE_REGS
+ lss 12+4(%esp), %esp # back to espfix stack
+ CFI_ADJUST_CFA_OFFSET -24
+ jmp irq_return
+ CFI_ENDPROC
+-END(nmi)
++ENDPROC(nmi)
+
+ ENTRY(int3)
+ RING0_INT_FRAME
+@@ -1409,7 +1680,7 @@ ENTRY(int3)
+ call do_int3
+ jmp ret_from_exception
+ CFI_ENDPROC
+-END(int3)
++ENDPROC(int3)
+
+ ENTRY(general_protection)
+ RING0_EC_FRAME
+@@ -1417,7 +1688,7 @@ ENTRY(general_protection)
+ CFI_ADJUST_CFA_OFFSET 4
+ jmp error_code
+ CFI_ENDPROC
+-END(general_protection)
++ENDPROC(general_protection)
+
+ /*
+ * End of kprobes section
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 34a56a9..7da97ae 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -53,6 +53,8 @@
+ #include <asm/paravirt.h>
+ #include <asm/ftrace.h>
+ #include <asm/percpu.h>
++#include <asm/pgtable.h>
++#include <asm/alternative-asm.h>
+
+ /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
+ #include <linux/elf-em.h>
+@@ -64,8 +66,9 @@
+ #ifdef CONFIG_FUNCTION_TRACER
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ ENTRY(mcount)
++ pax_force_retaddr
+ retq
+-END(mcount)
++ENDPROC(mcount)
+
+ ENTRY(ftrace_caller)
+ cmpl $0, function_trace_stop
+@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
+ #endif
+
+ GLOBAL(ftrace_stub)
++ pax_force_retaddr
+ retq
+-END(ftrace_caller)
++ENDPROC(ftrace_caller)
+
+ #else /* ! CONFIG_DYNAMIC_FTRACE */
+ ENTRY(mcount)
+@@ -108,6 +112,7 @@ ENTRY(mcount)
+ #endif
+
+ GLOBAL(ftrace_stub)
++ pax_force_retaddr
+ retq
+
+ trace:
+@@ -117,12 +122,13 @@ trace:
+ movq 8(%rbp), %rsi
+ subq $MCOUNT_INSN_SIZE, %rdi
+
++ pax_force_fptr ftrace_trace_function
+ call *ftrace_trace_function
+
+ MCOUNT_RESTORE_FRAME
+
+ jmp ftrace_stub
+-END(mcount)
++ENDPROC(mcount)
+ #endif /* CONFIG_DYNAMIC_FTRACE */
+ #endif /* CONFIG_FUNCTION_TRACER */
+
+@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
+
+ MCOUNT_RESTORE_FRAME
+
++ pax_force_retaddr
+ retq
+-END(ftrace_graph_caller)
++ENDPROC(ftrace_graph_caller)
+
+ GLOBAL(return_to_handler)
+ subq $24, %rsp
+@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
+ movq 8(%rsp), %rdx
+ movq (%rsp), %rax
+ addq $16, %rsp
++ pax_force_retaddr
+ retq
+ #endif
+
+@@ -174,6 +182,273 @@ ENTRY(native_usergs_sysret64)
+ ENDPROC(native_usergs_sysret64)
+ #endif /* CONFIG_PARAVIRT */
+
++ .macro ljmpq sel, off
++#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
++ .byte 0x48; ljmp *1234f(%rip)
++ .pushsection .rodata
++ .align 16
++ 1234: .quad \off; .word \sel
++ .popsection
++#else
++ pushq $\sel
++ pushq $\off
++ lretq
++#endif
++ .endm
++
++ .macro pax_enter_kernel
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_enter_kernel
++#endif
++ .endm
++
++ .macro pax_exit_kernel
++#ifdef CONFIG_PAX_KERNEXEC
++ call pax_exit_kernel
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(pax_enter_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ jnc 3f
++ mov %cs,%edi
++ cmp $__KERNEL_CS,%edi
++ jnz 2f
++1:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++2: ljmpq __KERNEL_CS,1f
++3: ljmpq __KERNEXEC_KERNEL_CS,4f
++4: SET_RDI_INTO_CR0
++ jmp 1b
++ENDPROC(pax_enter_kernel)
++
++ENTRY(pax_exit_kernel)
++ pushq %rdi
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ mov %cs,%rdi
++ cmp $__KERNEXEC_KERNEL_CS,%edi
++ jz 2f
++1:
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI);
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++
++2: GET_CR0_INTO_RDI
++ btr $16,%rdi
++ ljmpq __KERNEL_CS,3f
++3: SET_RDI_INTO_CR0
++ jmp 1b
++ENDPROC(pax_exit_kernel)
++#endif
++
++ .macro pax_enter_kernel_user
++ pax_set_fptr_mask
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_enter_kernel_user
++#endif
++ .endm
++
++ .macro pax_exit_kernel_user
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ call pax_exit_kernel_user
++#endif
++#ifdef CONFIG_PAX_RANDKSTACK
++ pushq %rax
++ call pax_randomize_kstack
++ popq %rax
++#endif
++ .endm
++
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ENTRY(pax_enter_kernel_user)
++ pushq %rdi
++ pushq %rbx
++
++#ifdef CONFIG_PARAVIRT
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++ GET_CR3_INTO_RDI
++ mov %rdi,%rbx
++ add $__START_KERNEL_map,%rbx
++ sub phys_base(%rip),%rbx
++
++#ifdef CONFIG_PARAVIRT
++ pushq %rdi
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0,i*8(%rbx)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2: popq %rdi
++#endif
++ SET_RDI_INTO_CR3
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ bts $16,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++#ifdef CONFIG_PARAVIRT
++ PV_RESTORE_REGS(CLBR_RDI)
++#endif
++
++ popq %rbx
++ popq %rdi
++ pax_force_retaddr
++ retq
++ENDPROC(pax_enter_kernel_user)
++
++ENTRY(pax_exit_kernel_user)
++ push %rdi
++
++#ifdef CONFIG_PARAVIRT
++ pushq %rbx
++ PV_SAVE_REGS(CLBR_RDI)
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ GET_CR0_INTO_RDI
++ btr $16,%rdi
++ SET_RDI_INTO_CR0
++#endif
++
++ GET_CR3_INTO_RDI
++ add $__START_KERNEL_map,%rdi
++ sub phys_base(%rip),%rdi
++
++#ifdef CONFIG_PARAVIRT
++ cmpl $0, pv_info+PARAVIRT_enabled
++ jz 1f
++ mov %rdi,%rbx
++ i = 0
++ .rept USER_PGD_PTRS
++ mov i*8(%rbx),%rsi
++ mov $0x67,%sil
++ lea i*8(%rbx),%rdi
++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
++ i = i + 1
++ .endr
++ jmp 2f
++1:
++#endif
++
++ i = 0
++ .rept USER_PGD_PTRS
++ movb $0x67,i*8(%rdi)
++ i = i + 1
++ .endr
++
++#ifdef CONFIG_PARAVIRT
++2: PV_RESTORE_REGS(CLBR_RDI)
++ popq %rbx
++#endif
++
++ popq %rdi
++ pax_force_retaddr
++ retq
++ENDPROC(pax_exit_kernel_user)
++#endif
++
++.macro pax_erase_kstack
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ call pax_erase_kstack
++#endif
++.endm
++
++#ifdef CONFIG_PAX_MEMORY_STACKLEAK
++ENTRY(pax_erase_kstack)
++ pushq %rdi
++ pushq %rcx
++ pushq %rax
++ pushq %r11
++
++ GET_THREAD_INFO(%r11)
++ mov TI_lowest_stack(%r11), %rdi
++ mov $-0xBEEF, %rax
++ std
++
++1: mov %edi, %ecx
++ and $THREAD_SIZE_asm - 1, %ecx
++ shr $3, %ecx
++ repne scasq
++ jecxz 2f
++
++ cmp $2*8, %ecx
++ jc 2f
++
++ mov $2*8, %ecx
++ repe scasq
++ jecxz 2f
++ jne 1b
++
++2: cld
++ mov %esp, %ecx
++ sub %edi, %ecx
++
++ cmp $THREAD_SIZE_asm, %rcx
++ jb 3f
++ ud2
++3:
++
++ shr $3, %ecx
++ rep stosq
++
++ mov TI_task_thread_sp0(%r11), %rdi
++ sub $256, %rdi
++ mov %rdi, TI_lowest_stack(%r11)
++
++ popq %r11
++ popq %rax
++ popq %rcx
++ popq %rdi
++ pax_force_retaddr
++ ret
++ENDPROC(pax_erase_kstack)
++#endif
+
+ .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
+ #ifdef CONFIG_TRACE_IRQFLAGS
+@@ -233,8 +508,8 @@ ENDPROC(native_usergs_sysret64)
+ .endm
+
+ .macro UNFAKE_STACK_FRAME
+- addq $8*6, %rsp
+- CFI_ADJUST_CFA_OFFSET -(6*8)
++ addq $8*6 + ARG_SKIP, %rsp
++ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
+ .endm
+
+ /*
+@@ -317,7 +592,7 @@ ENTRY(save_args)
+ leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
+ movq_cfi rbp, 8 /* push %rbp */
+ leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
+- testl $3, CS(%rdi)
++ testb $3, CS(%rdi)
+ je 1f
+ SWAPGS
+ /*
+@@ -337,9 +612,10 @@ ENTRY(save_args)
+ * We entered an interrupt context - irqs are off:
+ */
+ 2: TRACE_IRQS_OFF
++ pax_force_retaddr_bts
+ ret
+ CFI_ENDPROC
+-END(save_args)
++ENDPROC(save_args)
+
+ ENTRY(save_rest)
+ PARTIAL_FRAME 1 REST_SKIP+8
+@@ -352,9 +628,10 @@ ENTRY(save_rest)
+ movq_cfi r15, R15+16
+ movq %r11, 8(%rsp) /* return address */
+ FIXUP_TOP_OF_STACK %r11, 16
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(save_rest)
++ENDPROC(save_rest)
+
+ /* save complete stack frame */
+ .pushsection .kprobes.text, "ax"
+@@ -383,9 +660,10 @@ ENTRY(save_paranoid)
+ js 1f /* negative -> in kernel */
+ SWAPGS
+ xorl %ebx,%ebx
+-1: ret
++1: pax_force_retaddr_bts
++ ret
+ CFI_ENDPROC
+-END(save_paranoid)
++ENDPROC(save_paranoid)
+ .popsection
+
+ /*
+@@ -409,7 +687,7 @@ ENTRY(ret_from_fork)
+
+ RESTORE_REST
+
+- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
++ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
+ je int_ret_from_sys_call
+
+ testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
+@@ -419,7 +697,7 @@ ENTRY(ret_from_fork)
+ jmp ret_from_sys_call # go to the SYSRET fastpath
+
+ CFI_ENDPROC
+-END(ret_from_fork)
++ENDPROC(ret_from_fork)
+
+ /*
+ * System call entry. Upto 6 arguments in registers are supported.
+@@ -455,7 +733,7 @@ END(ret_from_fork)
+ ENTRY(system_call)
+ CFI_STARTPROC simple
+ CFI_SIGNAL_FRAME
+- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
++ CFI_DEF_CFA rsp,0
+ CFI_REGISTER rip,rcx
+ /*CFI_REGISTER rflags,r11*/
+ SWAPGS_UNSAFE_STACK
+@@ -468,12 +746,18 @@ ENTRY(system_call_after_swapgs)
+
+ movq %rsp,PER_CPU_VAR(old_rsp)
+ movq PER_CPU_VAR(kernel_stack),%rsp
++ SAVE_ARGS 8*6,1
++ pax_enter_kernel_user
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ pax_erase_kstack
++#endif
++
+ /*
+ * No need to follow this irqs off/on section - it's straight
+ * and short:
+ */
+ ENABLE_INTERRUPTS(CLBR_NONE)
+- SAVE_ARGS 8,1
+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
+ movq %rcx,RIP-ARGOFFSET(%rsp)
+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
+@@ -483,7 +767,7 @@ ENTRY(system_call_after_swapgs)
+ system_call_fastpath:
+ cmpq $__NR_syscall_max,%rax
+ ja badsys
+- movq %r10,%rcx
++ movq R10-ARGOFFSET(%rsp),%rcx
+ call *sys_call_table(,%rax,8) # XXX: rip relative
+ movq %rax,RAX-ARGOFFSET(%rsp)
+ /*
+@@ -502,6 +786,8 @@ sysret_check:
+ andl %edi,%edx
+ jnz sysret_careful
+ CFI_REMEMBER_STATE
++ pax_exit_kernel_user
++ pax_erase_kstack
+ /*
+ * sysretq will re-enable interrupts:
+ */
+@@ -555,14 +841,18 @@ badsys:
+ * jump back to the normal fast path.
+ */
+ auditsys:
+- movq %r10,%r9 /* 6th arg: 4th syscall arg */
++ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
+ movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
+ movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
+ movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
+ movq %rax,%rsi /* 2nd arg: syscall number */
+ movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
+ call audit_syscall_entry
++
++ pax_erase_kstack
++
+ LOAD_ARGS 0 /* reload call-clobbered registers */
++ pax_set_fptr_mask
+ jmp system_call_fastpath
+
+ /*
+@@ -592,16 +882,20 @@ tracesys:
+ FIXUP_TOP_OF_STACK %rdi
+ movq %rsp,%rdi
+ call syscall_trace_enter
++
++ pax_erase_kstack
++
+ /*
+ * Reload arg registers from stack in case ptrace changed them.
+ * We don't reload %rax because syscall_trace_enter() returned
+ * the value it wants us to use in the table lookup.
+ */
+ LOAD_ARGS ARGOFFSET, 1
++ pax_set_fptr_mask
+ RESTORE_REST
+ cmpq $__NR_syscall_max,%rax
+ ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
+- movq %r10,%rcx /* fixup for C */
++ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
+ call *sys_call_table(,%rax,8)
+ movq %rax,RAX-ARGOFFSET(%rsp)
+ /* Use IRET because user could have changed frame */
+@@ -613,7 +907,7 @@ tracesys:
+ GLOBAL(int_ret_from_sys_call)
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+- testl $3,CS-ARGOFFSET(%rsp)
++ testb $3,CS-ARGOFFSET(%rsp)
+ je retint_restore_args
+ movl $_TIF_ALLWORK_MASK,%edi
+ /* edi: mask to check */
+@@ -624,7 +918,9 @@ GLOBAL(int_with_check)
+ andl %edi,%edx
+ jnz int_careful
+ andl $~TS_COMPAT,TI_status(%rcx)
+- jmp retint_swapgs
++ pax_exit_kernel_user
++ pax_erase_kstack
++ jmp retint_swapgs_pax
+
+ /* Either reschedule or signal or syscall exit tracking needed. */
+ /* First do a reschedule test. */
+@@ -674,7 +970,7 @@ int_restore_rest:
+ TRACE_IRQS_OFF
+ jmp int_with_check
+ CFI_ENDPROC
+-END(system_call)
++ENDPROC(system_call)
+
+ /*
+ * Certain special system calls that need to save a complete full stack frame.
+@@ -690,7 +986,7 @@ ENTRY(\label)
+ call \func
+ jmp ptregscall_common
+ CFI_ENDPROC
+-END(\label)
++ENDPROC(\label)
+ .endm
+
+ PTREGSCALL stub_clone, sys_clone, %r8
+@@ -708,9 +1004,10 @@ ENTRY(ptregscall_common)
+ movq_cfi_restore R12+8, r12
+ movq_cfi_restore RBP+8, rbp
+ movq_cfi_restore RBX+8, rbx
++ pax_force_retaddr
+ ret $REST_SKIP /* pop extended registers */
+ CFI_ENDPROC
+-END(ptregscall_common)
++ENDPROC(ptregscall_common)
+
+ ENTRY(stub_execve)
+ CFI_STARTPROC
+@@ -726,7 +1023,7 @@ ENTRY(stub_execve)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+-END(stub_execve)
++ENDPROC(stub_execve)
+
+ /*
+ * sigreturn is special because it needs to restore all registers on return.
+@@ -744,7 +1041,7 @@ ENTRY(stub_rt_sigreturn)
+ RESTORE_REST
+ jmp int_ret_from_sys_call
+ CFI_ENDPROC
+-END(stub_rt_sigreturn)
++ENDPROC(stub_rt_sigreturn)
+
+ /*
+ * Build the entry stubs and pointer table with some assembler magic.
+@@ -780,7 +1077,7 @@ vector=vector+1
+ 2: jmp common_interrupt
+ .endr
+ CFI_ENDPROC
+-END(irq_entries_start)
++ENDPROC(irq_entries_start)
+
+ .previous
+ END(interrupt)
+@@ -800,6 +1097,16 @@ END(interrupt)
+ CFI_ADJUST_CFA_OFFSET 10*8
+ call save_args
+ PARTIAL_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rdi)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ call \func
+ .endm
+
+@@ -822,7 +1129,7 @@ ret_from_intr:
+ CFI_ADJUST_CFA_OFFSET -8
+ exit_intr:
+ GET_THREAD_INFO(%rcx)
+- testl $3,CS-ARGOFFSET(%rsp)
++ testb $3,CS-ARGOFFSET(%rsp)
+ je retint_kernel
+
+ /* Interrupt came from user space */
+@@ -844,12 +1151,16 @@ retint_swapgs: /* return to user-space */
+ * The iretq could re-enable interrupts:
+ */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel_user
++retint_swapgs_pax:
+ TRACE_IRQS_IRETQ
+ SWAPGS
+ jmp restore_args
+
+ retint_restore_args: /* return to kernel space */
+ DISABLE_INTERRUPTS(CLBR_ANY)
++ pax_exit_kernel
++ pax_force_retaddr (RIP-ARGOFFSET)
+ /*
+ * The iretq could re-enable interrupts:
+ */
+@@ -940,7 +1251,7 @@ ENTRY(retint_kernel)
+ #endif
+
+ CFI_ENDPROC
+-END(common_interrupt)
++ENDPROC(common_interrupt)
+
+ /*
+ * APIC interrupts.
+@@ -953,7 +1264,7 @@ ENTRY(\sym)
+ interrupt \do_sym
+ jmp ret_from_intr
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ #ifdef CONFIG_SMP
+@@ -1032,12 +1343,22 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET 15*8
+ call error_entry
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+ call \do_sym
+ jmp error_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ .macro paranoidzeroentry sym do_sym
+@@ -1049,12 +1370,22 @@ ENTRY(\sym)
+ subq $15*8, %rsp
+ call save_paranoid
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+ call \do_sym
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ .macro paranoidzeroentry_ist sym do_sym ist
+@@ -1066,15 +1397,30 @@ ENTRY(\sym)
+ subq $15*8, %rsp
+ call save_paranoid
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ xorl %esi,%esi /* no error code */
+- PER_CPU(init_tss, %rbp)
++#ifdef CONFIG_SMP
++ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
++ lea init_tss(%rbp), %rbp
++#else
++ lea init_tss(%rip), %rbp
++#endif
+ subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
+ call \do_sym
+ addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ .macro errorentry sym do_sym
+@@ -1085,13 +1431,23 @@ ENTRY(\sym)
+ CFI_ADJUST_CFA_OFFSET 15*8
+ call error_entry
+ DEFAULT_FRAME 0
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ movq ORIG_RAX(%rsp),%rsi /* get error code */
+ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
+ call \do_sym
+ jmp error_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ /* error code is on the stack already */
+@@ -1104,13 +1460,23 @@ ENTRY(\sym)
+ call save_paranoid
+ DEFAULT_FRAME 0
+ TRACE_IRQS_OFF
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ testb $3, CS(%rsp)
++ jnz 1f
++ pax_enter_kernel
++ jmp 2f
++1: pax_enter_kernel_user
++2:
++#else
++ pax_enter_kernel
++#endif
+ movq %rsp,%rdi /* pt_regs pointer */
+ movq ORIG_RAX(%rsp),%rsi /* get error code */
+ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
+ call \do_sym
+ jmp paranoid_exit /* %ebx: no swapgs flag */
+ CFI_ENDPROC
+-END(\sym)
++ENDPROC(\sym)
+ .endm
+
+ zeroentry divide_error do_divide_error
+@@ -1141,9 +1507,10 @@ gs_change:
+ SWAPGS
+ popf
+ CFI_ADJUST_CFA_OFFSET -8
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(native_load_gs_index)
++ENDPROC(native_load_gs_index)
+
+ .section __ex_table,"a"
+ .align 8
+@@ -1193,11 +1560,12 @@ ENTRY(kernel_thread)
+ * of hacks for example to fork off the per-CPU idle tasks.
+ * [Hopefully no generic code relies on the reschedule -AK]
+ */
+- RESTORE_ALL
++ RESTORE_REST
+ UNFAKE_STACK_FRAME
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(kernel_thread)
++ENDPROC(kernel_thread)
+
+ ENTRY(child_rip)
+ pushq $0 # fake return address
+@@ -1208,13 +1576,14 @@ ENTRY(child_rip)
+ */
+ movq %rdi, %rax
+ movq %rsi, %rdi
++ pax_force_fptr %rax
+ call *%rax
+ # exit
+ mov %eax, %edi
+ call do_exit
+ ud2 # padding for call trace
+ CFI_ENDPROC
+-END(child_rip)
++ENDPROC(child_rip)
+
+ /*
+ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
+@@ -1241,11 +1610,11 @@ ENTRY(kernel_execve)
+ RESTORE_REST
+ testq %rax,%rax
+ je int_ret_from_sys_call
+- RESTORE_ARGS
+ UNFAKE_STACK_FRAME
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(kernel_execve)
++ENDPROC(kernel_execve)
+
+ /* Call softirq on interrupt stack. Interrupts are off. */
+ ENTRY(call_softirq)
+@@ -1263,9 +1632,10 @@ ENTRY(call_softirq)
+ CFI_DEF_CFA_REGISTER rsp
+ CFI_ADJUST_CFA_OFFSET -8
+ decl PER_CPU_VAR(irq_count)
++ pax_force_retaddr
+ ret
+ CFI_ENDPROC
+-END(call_softirq)
++ENDPROC(call_softirq)
+
+ #ifdef CONFIG_XEN
+ zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
+@@ -1303,7 +1673,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
+ decl PER_CPU_VAR(irq_count)
+ jmp error_exit
+ CFI_ENDPROC
+-END(xen_do_hypervisor_callback)
++ENDPROC(xen_do_hypervisor_callback)
+
+ /*
+ * Hypervisor uses this for application faults while it executes.
+@@ -1362,7 +1732,7 @@ ENTRY(xen_failsafe_callback)
+ SAVE_ALL
+ jmp error_exit
+ CFI_ENDPROC
+-END(xen_failsafe_callback)
++ENDPROC(xen_failsafe_callback)
+
+ #endif /* CONFIG_XEN */
+
+@@ -1405,16 +1775,31 @@ ENTRY(paranoid_exit)
+ TRACE_IRQS_OFF
+ testl %ebx,%ebx /* swapgs needed? */
+ jnz paranoid_restore
+- testl $3,CS(%rsp)
++ testb $3,CS(%rsp)
+ jnz paranoid_userspace
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel
++ TRACE_IRQS_IRETQ 0
++ SWAPGS_UNSAFE_STACK
++ RESTORE_ALL 8
++ pax_force_retaddr_bts
++ jmp irq_return
++#endif
+ paranoid_swapgs:
++#ifdef CONFIG_PAX_MEMORY_UDEREF
++ pax_exit_kernel_user
++#else
++ pax_exit_kernel
++#endif
+ TRACE_IRQS_IRETQ 0
+ SWAPGS_UNSAFE_STACK
+ RESTORE_ALL 8
+ jmp irq_return
+ paranoid_restore:
++ pax_exit_kernel
+ TRACE_IRQS_IRETQ 0
+ RESTORE_ALL 8
++ pax_force_retaddr_bts
+ jmp irq_return
+ paranoid_userspace:
+ GET_THREAD_INFO(%rcx)
+@@ -1443,7 +1828,7 @@ paranoid_schedule:
+ TRACE_IRQS_OFF
+ jmp paranoid_userspace
+ CFI_ENDPROC
+-END(paranoid_exit)